diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 00000000..9871f4ab --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,8 @@ +{ + "[m68k]": { + "editor.rulers": [] + }, + "C_Cpp.formatting": "clangFormat", + "C_Cpp.clang_format_style": "file", + "C_Cpp.clang_format_fallbackStyle": "Google" +} \ No newline at end of file diff --git a/CMakeLists.txt b/CMakeLists.txt index 20115c50..6bad58cd 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -19,13 +19,22 @@ add_subdirectory(external/zlib) add_subdirectory(external/ADFlib) add_subdirectory(external/dear_imgui) add_subdirectory(external/capstone) +add_subdirectory(external/EASTL) if (APPLE OR LINUX OR UNIX) find_package(SDL2 REQUIRED) endif() + +add_definitions(-DEASTL_EASTDC_VSNPRINTF=0) + if (WIN32) add_definitions(-D_HAS_STD_BYTE=0) + if(MSVC) # Enable Edit and Continue for Debug builds + string(REGEX REPLACE "/Z[iI7]" "" CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG}") + set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /ZI") + add_link_options("/INCREMENTAL") + endif() endif() include_directories(uae_src/include uae_src src .) @@ -201,7 +210,8 @@ add_executable(quaesar # Recursively glob for all .cpp files in the src directory file(GLOB_RECURSE CROSS_FILES "src/*.cpp") -target_sources(quaesar PRIVATE ${CROSS_FILES}) +file(GLOB_RECURSE QUAE_HEADERS "src/*.h") +target_sources(quaesar PRIVATE ${CROSS_FILES} ${QUAE_HEADERS}) if (APPLE OR LINUX OR UNIX) target_compile_options(quaesar PRIVATE -DUAE=1 -D_cdecl= -DFILEFLAG_WRITE=1 -DOS_NAME=\"linux\") @@ -235,4 +245,11 @@ target_include_directories(quaesar PRIVATE "${CMAKE_SOURCE_DIR}/external/ADFlib/ target_include_directories(quaesar PRIVATE "${CMAKE_SOURCE_DIR}/external/dear_imgui") target_include_directories(quaesar PRIVATE "${CMAKE_SOURCE_DIR}/external") target_include_directories(quaesar PRIVATE "${CMAKE_SOURCE_DIR}/external/capstone/include") -target_link_libraries(quaesar PRIVATE ${SDL2_LIBRARIES} zlibstatic adf imgui capstone) +target_include_directories(quaesar PRIVATE "${CMAKE_SOURCE_DIR}/external/EASTL/include") +target_link_libraries(quaesar PRIVATE ${SDL2_LIBRARIES} zlibstatic adf imgui capstone EASTL) + + +if(NOT EXISTS "${CMAKE_CURRENT_BINARY_DIR}/imgui.ini") + file(COPY_FILE "${CMAKE_CURRENT_LIST_DIR}/bin/install/default_layout.ini" "${CMAKE_CURRENT_BINARY_DIR}/imgui.ini") +endif() + diff --git a/bin/install/default_layout.ini b/bin/install/default_layout.ini new file mode 100644 index 00000000..977a6902 --- /dev/null +++ b/bin/install/default_layout.ini @@ -0,0 +1,74 @@ +[Window][Quaesar debugger] +Pos=0,17 +Size=1280,703 +Collapsed=0 + +[Window][Debug##Default] +Pos=60,60 +Size=400,400 +Collapsed=0 + +[Window][Disassembly] +Pos=8,42 +Size=320,670 +Collapsed=0 +DockId=0x00000001,0 + +[Window][Registers] +Pos=1011,42 +Size=261,495 +Collapsed=0 +DockId=0x0000000A,0 + +[Window][Console] +Pos=330,539 +Size=942,173 +Collapsed=0 +DockId=0x00000003,1 + +[Window][Screen] +Pos=330,42 +Size=679,495 +Collapsed=0 +DockId=0x00000009,0 + +[Window][Palette] +Pos=1011,42 +Size=261,495 +Collapsed=0 +DockId=0x0000000A,1 + +[Window][Memory graph] +Pos=330,42 +Size=679,495 +Collapsed=0 +DockId=0x00000009,1 + +[Window][Memory] +Pos=330,539 +Size=942,173 +Collapsed=0 +DockId=0x00000003,0 + +[Table][0x6038E043,3] +RefScale=13 +Column 0 Width=70 +Column 1 Width=84 +Column 2 Width=175 + +[Table][0xF68F8465,4] +RefScale=13 +Column 0 Width=14 +Column 1 Width=70 +Column 2 Width=14 +Column 3 Width=70 + +[Docking][Data] +DockSpace ID=0x7A095824 Window=0xBB552D6F Pos=8,42 Size=1264,670 Split=X + DockNode ID=0x00000001 Parent=0x7A095824 SizeRef=320,670 Selected=0x19EDEE42 + DockNode ID=0x00000004 Parent=0x7A095824 SizeRef=942,670 Split=Y + DockNode ID=0x00000002 Parent=0x00000004 SizeRef=1264,495 Split=X Selected=0x5B855092 + DockNode ID=0x00000009 Parent=0x00000002 SizeRef=679,262 Selected=0x5B855092 + DockNode ID=0x0000000A Parent=0x00000002 SizeRef=261,262 Selected=0xEAEE9E08 + DockNode ID=0x00000003 Parent=0x00000004 SizeRef=1264,173 CentralNode=1 Selected=0xB2805FDB + diff --git a/external/EASTL/CMakeLists.txt b/external/EASTL/CMakeLists.txt new file mode 100644 index 00000000..22fb1d96 --- /dev/null +++ b/external/EASTL/CMakeLists.txt @@ -0,0 +1,106 @@ +#------------------------------------------------------------------------------------------- +# Copyright (C) Electronic Arts Inc. All rights reserved. +#------------------------------------------------------------------------------------------- +cmake_minimum_required(VERSION 3.15) +#include(FetchContent) +project(EASTL CXX) + +#------------------------------------------------------------------------------------------- +# Options +#------------------------------------------------------------------------------------------- +option(EASTL_BUILD_BENCHMARK "Enable generation of build files for benchmark" OFF) +option(EASTL_BUILD_TESTS "Enable generation of build files for tests" OFF) +option(EASTL_STD_ITERATOR_CATEGORY_ENABLED "Enable compatibility with std:: iterator categories" OFF) + + +option(EASTL_DISABLE_APRIL_2024_DEPRECATIONS "Enable use of API marked for removal in April 2024." OFF) +option(EASTL_DISABLE_SEPT_2024_DEPRECATIONS "Enable use of API marked for removal in September 2024." OFF) +option(EASTL_DISABLE_APRIL_2025_DEPRECATIONS "Enable use of API marked for removal in April 2025." OFF) + +#------------------------------------------------------------------------------------------- +# Compiler Flags +#------------------------------------------------------------------------------------------- +set (CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH};${CMAKE_CURRENT_SOURCE_DIR}/scripts/CMake") +#include(CommonCppFlags) + +#------------------------------------------------------------------------------------------- +# Library definition +#------------------------------------------------------------------------------------------- +file(GLOB EASTL_SOURCES "source/*.cpp") +file(GLOB_RECURSE EASTL_HEADERS "include/EASTL/**.h") +add_library(EASTL ${EASTL_SOURCES} ${EASTL_HEADERS}) +target_compile_features(EASTL PUBLIC cxx_std_14) + +# include both source and headers in the files tab in Visual Studio +source_group(TREE ${CMAKE_CURRENT_SOURCE_DIR} PREFIX "Header Files" FILES ${EASTL_HEADERS}) + +if (MSVC) + set(EASTL_NATVIS_DIR "doc") + set(EASTL_NATVIS_FILE "${EASTL_NATVIS_DIR}/EASTL.natvis") + target_sources(EASTL INTERFACE + $ + $ + ) +endif() + +if(EASTL_BUILD_BENCHMARK) + add_subdirectory(benchmark) +endif() + +if(EASTL_BUILD_TESTS) + add_subdirectory(test) +endif() + +#------------------------------------------------------------------------------------------- +# Defines +#------------------------------------------------------------------------------------------- +add_definitions(-D_CHAR16T) +add_definitions(-D_CRT_SECURE_NO_WARNINGS) +add_definitions(-D_SCL_SECURE_NO_WARNINGS) +add_definitions(-DEASTL_OPENSOURCE=1) +if (EASTL_STD_ITERATOR_CATEGORY_ENABLED) + add_definitions(-DEASTL_STD_ITERATOR_CATEGORY_ENABLED=1) +endif() + +#------------------------------------------------------------------------------------------- +# Include dirs +#------------------------------------------------------------------------------------------- +target_include_directories(EASTL PUBLIC include) + +#------------------------------------------------------------------------------------------- +# Dependencies +#------------------------------------------------------------------------------------------- +#FetchContent_Declare( +# EABase +# GIT_REPOSITORY https://github.com/electronicarts/EABase.git +# GIT_TAG 123363eb82e132c0181ac53e43226d8ee76dea12 +# GIT_SUBMODULES "" # This should be temporary until we update the cyclic submodule dependencies in EABase. +#) + +#FetchContent_MakeAvailable(EABase) + +#target_link_libraries(EASTL EABase) + +#------------------------------------------------------------------------------------------- +# Deprecations +#------------------------------------------------------------------------------------------- +if(EASTL_DISABLE_APRIL_2024_DEPRECATIONS) + target_compile_definitions(EASTL PUBLIC EA_DEPRECATIONS_FOR_2024_APRIL=EA_DISABLED) +endif() +if(EASTL_DISABLE_SEPT_2024_DEPRECATIONS) + target_compile_definitions(EASTL PUBLIC EA_DEPRECATIONS_FOR_2024_SEPT=EA_DISABLED) +endif() +if(EASTL_DISABLE_APRIL_2025_DEPRECATIONS) + target_compile_definitions(EASTL PUBLIC EA_DEPRECATIONS_FOR_2025_APRIL=EA_DISABLED) +endif() + + +#------------------------------------------------------------------------------------------- +# Installation +#------------------------------------------------------------------------------------------- +install(TARGETS EASTL DESTINATION lib) +install(DIRECTORY include/EASTL DESTINATION include) + +if (MSVC) + install(FILES ${EASTL_NATVIS_FILE} DESTINATION ${EASTL_NATVIS_DIR}) +endif() diff --git a/external/EASTL/LICENSE b/external/EASTL/LICENSE new file mode 100644 index 00000000..1b112db6 --- /dev/null +++ b/external/EASTL/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2019, Electronic Arts +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/external/EASTL/doc/EASTL.natvis b/external/EASTL/doc/EASTL.natvis new file mode 100644 index 00000000..e1ec96dc --- /dev/null +++ b/external/EASTL/doc/EASTL.natvis @@ -0,0 +1,761 @@ + + + + + + + + ({(void*)mPair.mFirst} = {*mPair.mFirst}) + ({nullptr}) + + (void*)mPair.mFirst + *mPair.mFirst + + + + + ({(void*)mpValue} = {*mpValue}) + ({nullptr}) + + (void*)mpValue + *mpValue + mpRefCount->mRefCount + mpRefCount->mWeakRefCount + + + + + {((mpRefCount && mpRefCount->mRefCount) ? mpValue : nullptr)} + + mpRefCount && mpRefCount->mRefCount ? mpValue : nullptr + + + + + [{$T2}] {{}} + [{$T2}] {{ {*mValue} }} + [{$T2}] {{ {*mValue}, {*(mValue+1)} }} + [{$T2}] {{ {*mValue}, {*(mValue+1)}, {*(mValue+2)} }} + [{$T2}] {{ {*mValue}, {*(mValue+1)}, {*(mValue+2)}, {*(mValue+3)} }} + [{$T2}] {{ {*mValue}, {*(mValue+1)}, {*(mValue+2)}, {*(mValue+3)}, {*(mValue+4)} }} + [{$T2}] {{ {*mValue}, {*(mValue+1)}, {*(mValue+2)}, {*(mValue+3)}, {*(mValue+4)}, {*(mValue+5)} }} + [{$T2}] {{ {*mValue}, {*(mValue+1)}, {*(mValue+2)}, {*(mValue+3)}, {*(mValue+4)}, {*(mValue+5)}, ... }} + + $T2 + + $T2 + mValue + + + + + + "{mPair.mFirst.heap.mpBegin,sb}" + "{mPair.mFirst.sso.mData,sb}" + + mPair.mFirst.heap.mnSize + (mPair.mFirst.heap.mnCapacity & ~kHeapMask) + mPair.mFirst.heap.mpBegin,sb + + (SSOLayout::SSO_CAPACITY - mPair.mFirst.sso.mRemainingSizeField.mnRemainingSize) + SSOLayout::SSO_CAPACITY + mPair.mFirst.sso.mData,sb + + !!(mPair.mFirst.sso.mRemainingSizeField.mnRemainingSize & kSSOMask) + + + + + + {mPair.mFirst.heap.mpBegin,su} + {mPair.mFirst.sso.mData,su} + + mPair.mFirst.heap.mnSize + (mPair.mFirst.heap.mnCapacity & ~kHeapMask) + mPair.mFirst.heap.mpBegin,su + + (SSOLayout::SSO_CAPACITY - mPair.mFirst.sso.mRemainingSizeField.mnRemainingSize) + SSOLayout::SSO_CAPACITY + mPair.mFirst.sso.mData,su + + !!(mPair.mFirst.sso.mRemainingSizeField.mnRemainingSize & kSSOMask) + + + + + ({first}, {second}) + + first + second + + + + + [{mnSize}] {{}} + [{mnSize}] {{ {*mpData} }} + [{mnSize}] {{ {*mpData}, {*(mpData+1)} }} + [{mnSize}] {{ {*mpData}, {*(mpData+1)}, {*(mpData+2)} }} + [{mnSize}] {{ {*mpData}, {*(mpData+1)}, {*(mpData+2)}, {*(mpData+3)} }} + [{mnSize}] {{ {*mpData}, {*(mpData+1)}, {*(mpData+2)}, {*(mpData+3)}, {*(mpData+4)} }} + [{mnSize}] {{ {*mpData}, {*(mpData+1)}, {*(mpData+2)}, {*(mpData+3)}, {*(mpData+4)}, {*(mpData+5)} }} + [{mnSize}] {{ {*mpData}, {*(mpData+1)}, {*(mpData+2)}, {*(mpData+3)}, {*(mpData+4)}, {*(mpData+5)}, ... }} + + mnSize + + mnSize + mpData + + + + + + [{mpEnd - mpBegin}] {{}} + [{mpEnd - mpBegin}] {{ {*mpBegin} }} + [{mpEnd - mpBegin}] {{ {*mpBegin}, {*(mpBegin+1)} }} + [{mpEnd - mpBegin}] {{ {*mpBegin}, {*(mpBegin+1)}, {*(mpBegin+2)} }} + [{mpEnd - mpBegin}] {{ {*mpBegin}, {*(mpBegin+1)}, {*(mpBegin+2)}, {*(mpBegin+3)} }} + [{mpEnd - mpBegin}] {{ {*mpBegin}, {*(mpBegin+1)}, {*(mpBegin+2)}, {*(mpBegin+3)}, {*(mpBegin+4)} }} + [{mpEnd - mpBegin}] {{ {*mpBegin}, {*(mpBegin+1)}, {*(mpBegin+2)}, {*(mpBegin+3)}, {*(mpBegin+4)}, {*(mpBegin+5)} }} + [{mpEnd - mpBegin}] {{ {*mpBegin}, {*(mpBegin+1)}, {*(mpBegin+2)}, {*(mpBegin+3)}, {*(mpBegin+4)}, {*(mpBegin+5)}, ... }} + + mpEnd - mpBegin + mCapacityAllocator.mFirst - mpBegin + + mpEnd - mpBegin + mpBegin + + + + + + + [0] {{}} + + + [1] {{ {*mItBegin.mpCurrent} }} + + + [{(mItEnd.mpCurrentArrayPtr - mItBegin.mpCurrentArrayPtr) * $T3 + (mItEnd.mpCurrent-mItEnd.mpBegin) - (mItBegin.mpCurrent-mItBegin.mpBegin)}] + {{ + {*mItBegin.mpCurrent}, + ... + }} + + + (mItEnd.mpCurrentArrayPtr - mItBegin.mpCurrentArrayPtr) * $T3 + (mItEnd.mpCurrent-mItEnd.mpBegin) - (mItBegin.mpCurrent-mItBegin.mpBegin) + + (mItEnd.mpCurrentArrayPtr - mItBegin.mpCurrentArrayPtr) * $T3 + (mItEnd.mpCurrent-mItEnd.mpBegin) - (mItBegin.mpCurrent-mItBegin.mpBegin) + mItBegin.mpCurrentArrayPtr[(mItBegin.mpCurrent-mItBegin.mpBegin + $i) / $T3][(mItBegin.mpCurrent-mItBegin.mpBegin + $i) % $T3] + + + + + + {*mpCurrent} + + *mpCurrent + *(*(mpCurrentArrayPtr-1) + (mpEnd-mpBegin) - 1) + *(mpCurrent-1) + **(mpCurrentArrayPtr+1) + *(mpCurrent+1) + mpCurrent == mpBegin + mpCurrent+1 == mpEnd + + + + + + + {c} + + c + + + + + + [0] {{}} + + + [1] {{ {((eastl::ListNode<$T1>*)mNodeAllocator.mFirst.mpNext)->mValue} }} + + + [2] + {{ + {((eastl::ListNode<$T1>*)mNodeAllocator.mFirst.mpNext)->mValue}, + {((eastl::ListNode<$T1>*)mNodeAllocator.mFirst.mpNext->mpNext)->mValue} + }} + + + [?] + {{ + {((eastl::ListNode<$T1>*)mNodeAllocator.mFirst.mpNext)->mValue}, + {((eastl::ListNode<$T1>*)mNodeAllocator.mFirst.mpNext->mpNext)->mValue}, + ... + }} + + + + Content of lists will repeat indefinitely. Keep that in mind! + + + mNodeAllocator.mFirst.mpNext + mpNext + ((eastl::ListNode<$T1>*)this)->mValue + + + + + + {mValue} + + mValue + *(eastl::ListNode<$T1>*)mpNext + *(eastl::ListNode<$T1>*)mpPrev + + Content of lists will repeat indefinitely. Keep that in mind! + + + The rest of the list follows: + + + (eastl::ListNode<$T1>*)mpNext->mpNext + (eastl::ListNode<$T1>*)mpNext + mValue + + + + + + {*mpNode} + + *(eastl::ListNode<$T1>*)mpNode + + + + + + [0] {{}} + + + [1] + {{ + {((eastl::SListNode<$T1>*)mNode.mpNext)->mValue} + }} + + + [2] + {{ + {((eastl::SListNode<$T1>*)mNode.mpNext)->mValue}, + {((eastl::SListNode<$T1>*)mNode.mpNext->mpNext)->mValue} + }} + + + [?] + {{ + {((eastl::SListNode<$T1>*)mNode.mpNext)->mValue}, + {((eastl::SListNode<$T1>*)mNode.mpNext->mpNext)->mValue}, + ... + }} + + + + mNode.mpNext + mpNext + ((eastl::SListNode<$T1>*)this)->mValue + + + + + + {mValue} + + mValue + *(eastl::SListNode<$T1>*)mpNext + + The rest of the list follows: + + + mpNext == nullptr ? nullptr : (eastl::SListNode<$T1>*)mpNext->mpNext + (eastl::SListNode<$T1>*)mpNext + mValue + + + + + + {*mpNode} + + *(eastl::SListNode<$T1>*)mpNode + + + + + [0] {{}} + [1] {{ {mAnchor.mpNext} }} + [?] {{ {mAnchor.mpNext}, ... }} + + + Content of intrusive lists will repeat indefinitely. Keep that in mind! + + + mAnchor.mpNext + mpNext + *this + + + + + + {*($T1*)mpNode} + + *($T1*)mpNode + + + + + + + [0] {{}} + + + [1] + {{ + {((eastl::rbtree_node<$T1>*)mAnchor.mpNodeLeft)->mValue} + }} + + + [{mnSize}] + {{ + {((eastl::rbtree_node<$T1>*)mAnchor.mpNodeLeft)->mValue}, + ... + }} + + + mnSize + + mnSize + mAnchor.mpNodeParent + mpNodeLeft + mpNodeRight + ((eastl::rbtree_node<$T1>*)this)->mValue + + + + + + + [0] {{}} + + + [1] + {{ + {((eastl::rbtree_node<$T2>*)mAnchor.mpNodeLeft)->mValue} + }} + + + [{mnSize}] + {{ + {((eastl::rbtree_node<$T2>*)mAnchor.mpNodeLeft)->mValue}, + ... + }} + + + mnSize + + mnSize + mAnchor.mpNodeParent + mpNodeLeft + mpNodeRight + ((eastl::rbtree_node<$T2>*)this)->mValue + + + + + + {mValue} + + mValue + + It is possible to expand parents that do not exist. + + *(eastl::rbtree_node<$T1>*)mpNodeParent + *(eastl::rbtree_node<$T1>*)mpNodeLeft + *(eastl::rbtree_node<$T1>*)mpNodeRight + + + + + {*(eastl::rbtree_node<$T1>*)mpNode} + + *(eastl::rbtree_node<$T1>*)mpNode + + + + + + [{mnElementCount}] {{}} + [{mnElementCount}] {{ ... }} + + + mnBucketCount + mpBucketArray + + + + + + entry->mValue + + entry = entry->mpNext + + + bucketIndex++ + + entry = mpBucketArray[bucketIndex] + + + + + + + + + [{mnElementCount}] {{}} + [{mnElementCount}] {{ ... }} + + + + + + entry->mValue.second + + entry = entry->mpNext + + + bucketIndex++ + + entry = mpBucketArray[bucketIndex] + + + + + + + + {mValue}, {*mpNext} + {mValue} + + + + this + mpNext + mValue + + + + + + {mpNode->mValue} + + mpNode->mValue + + + + + {*(mIterator-1)} + + mIterator-1 + + + + + {{count = {kSize}}} + + kSize + + + + + + kSize + + + bBitValue = ((mWord[iWord] >> iBitInWord) % 2) != 0 ? true : false + bBitValue + iBitInWord++ + + iWord++ + iBitInWord = 0 + + + + + + + + {c} + + c + + + + + {mpBegin,[mnCount]} + mpBegin,[mnCount] + + + + ({mFirst}, {mSecond}) + ({mSecond}) + ({mFirst}) + (empty) + (empty) + ({mFirst}, {mSecond}) + + + + + nullopt + {value()} + + value() + + + + + {$T1} to {$T2}} + + + + + {mRep} nanoseconds + + + + {mRep} microseconds + + + + {mRep} milliseconds + + + + {mRep} seconds + + + + {mRep} minutes + + + + {mRep} hours + + + + {mRep} duration with ratio = [{$T2} : {$T3}] + + + + + + empty + {mInvokeFuncPtr} + + + + + {*val} + + + + + empty + {m_storage.external_storage} + + + + + {mAtomic} + + mAtomic + + + + + {mFlag.mAtomic} + + + + + [valueless_by_exception] + {{ index=0, value={($T1*)mStorage.mBuffer.mCharData}} + {{ index=1, value={($T2*)mStorage.mBuffer.mCharData}} + {{ index=2, value={($T3*)mStorage.mBuffer.mCharData}} + {{ index=3, value={($T4*)mStorage.mBuffer.mCharData}} + {{ index=4, value={($T5*)mStorage.mBuffer.mCharData}} + {{ index=5, value={($T6*)mStorage.mBuffer.mCharData}} + {{ index=6, value={($T7*)mStorage.mBuffer.mCharData}} + {{ index=7, value={($T8*)mStorage.mBuffer.mCharData}} + {{ index=8, value={($T9*)mStorage.mBuffer.mCharData}} + {{ index=9, value={($T10*)mStorage.mBuffer.mCharData}} + {{ index=10, value={($T11*)mStorage.mBuffer.mCharData}} + {{ index=11, value={($T12*)mStorage.mBuffer.mCharData}} + {{ index=12, value={($T13*)mStorage.mBuffer.mCharData}} + {{ index=13, value={($T14*)mStorage.mBuffer.mCharData}} + {{ index=14, value={($T15*)mStorage.mBuffer.mCharData}} + {{ index=15, value={($T16*)mStorage.mBuffer.mCharData}} + {{ index=16, value={($T17*)mStorage.mBuffer.mCharData}} + {{ index=17, value={($T18*)mStorage.mBuffer.mCharData}} + {{ index=18, value={($T19*)mStorage.mBuffer.mCharData}} + {{ index=19, value={($T20*)mStorage.mBuffer.mCharData}} + {{ index=20, value={($T21*)mStorage.mBuffer.mCharData}} + {{ index=21, value={($T22*)mStorage.mBuffer.mCharData}} + {{ index=22, value={($T23*)mStorage.mBuffer.mCharData}} + {{ index=23, value={($T24*)mStorage.mBuffer.mCharData}} + {{ index=24, value={($T25*)mStorage.mBuffer.mCharData}} + {{ index=25, value={($T26*)mStorage.mBuffer.mCharData}} + {{ index=26, value={($T27*)mStorage.mBuffer.mCharData}} + {{ index=27, value={($T28*)mStorage.mBuffer.mCharData}} + {{ index=28, value={($T29*)mStorage.mBuffer.mCharData}} + {{ index=29, value={($T30*)mStorage.mBuffer.mCharData}} + {{ index=30, value={($T31*)mStorage.mBuffer.mCharData}} + + index() + ($T1*)mStorage.mBuffer.mCharData + ($T2*)mStorage.mBuffer.mCharData + ($T3*)mStorage.mBuffer.mCharData + ($T4*)mStorage.mBuffer.mCharData + ($T5*)mStorage.mBuffer.mCharData + ($T6*)mStorage.mBuffer.mCharData + ($T7*)mStorage.mBuffer.mCharData + ($T8*)mStorage.mBuffer.mCharData + ($T9*)mStorage.mBuffer.mCharData + ($T10*)mStorage.mBuffer.mCharData + ($T11*)mStorage.mBuffer.mCharData + ($T12*)mStorage.mBuffer.mCharData + ($T13*)mStorage.mBuffer.mCharData + ($T14*)mStorage.mBuffer.mCharData + ($T15*)mStorage.mBuffer.mCharData + ($T16*)mStorage.mBuffer.mCharData + ($T17*)mStorage.mBuffer.mCharData + ($T18*)mStorage.mBuffer.mCharData + ($T19*)mStorage.mBuffer.mCharData + ($T20*)mStorage.mBuffer.mCharData + ($T21*)mStorage.mBuffer.mCharData + ($T22*)mStorage.mBuffer.mCharData + ($T23*)mStorage.mBuffer.mCharData + ($T24*)mStorage.mBuffer.mCharData + ($T25*)mStorage.mBuffer.mCharData + ($T26*)mStorage.mBuffer.mCharData + ($T27*)mStorage.mBuffer.mCharData + ($T28*)mStorage.mBuffer.mCharData + ($T29*)mStorage.mBuffer.mCharData + ($T30*)mStorage.mBuffer.mCharData + ($T31*)mStorage.mBuffer.mCharData + + + + + + + ({*this,view(noparens)}) + + + + + {(*((eastl::Internal::TupleLeaf<0,$T1,0>*)&mImpl)).mValue} + ({*this,view(noparens)}) + + (*((eastl::Internal::TupleLeaf<0,$T1,0>*)&mImpl)).mValue + + + + + {(*((eastl::Internal::TupleLeaf<0,$T1,0>*)&mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf<1,$T2,0>*)&mImpl)).mValue} + ({*this,view(noparens)}) + + (*((eastl::Internal::TupleLeaf<0,$T1,0>*)&mImpl)).mValue + (*((eastl::Internal::TupleLeaf<1,$T2,0>*)&mImpl)).mValue + + + + + {(*((eastl::Internal::TupleLeaf<0,$T1,0>*)&mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf<1,$T2,0>*)&mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf<2,$T3,0>*)&mImpl)).mValue} + ({*this,view(noparens)}) + + (*((eastl::Internal::TupleLeaf<0,$T1,0>*)&mImpl)).mValue + (*((eastl::Internal::TupleLeaf<1,$T2,0>*)&mImpl)).mValue + (*((eastl::Internal::TupleLeaf<2,$T3,0>*)&mImpl)).mValue + + + + + {(*((eastl::Internal::TupleLeaf<0,$T1,0>*)&mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf<1,$T2,0>*)&mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf<2,$T3,0>*)&mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf<3,$T4,0>*)&mImpl)).mValue} + ({*this,view(noparens)}) + + (*((eastl::Internal::TupleLeaf<0,$T1,0>*)&mImpl)).mValue + (*((eastl::Internal::TupleLeaf<1,$T2,0>*)&mImpl)).mValue + (*((eastl::Internal::TupleLeaf<2,$T3,0>*)&mImpl)).mValue + (*((eastl::Internal::TupleLeaf<3,$T4,0>*)&mImpl)).mValue + + + + + {(*((eastl::Internal::TupleLeaf<0,$T1,0>*)&mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf<1,$T2,0>*)&mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf<2,$T3,0>*)&mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf<3,$T4,0>*)&mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf<4,$T5,0>*)&mImpl)).mValue} + ({*this,view(noparens)}) + + (*((eastl::Internal::TupleLeaf<0,$T1,0>*)&mImpl)).mValue + (*((eastl::Internal::TupleLeaf<1,$T2,0>*)&mImpl)).mValue + (*((eastl::Internal::TupleLeaf<2,$T3,0>*)&mImpl)).mValue + (*((eastl::Internal::TupleLeaf<3,$T4,0>*)&mImpl)).mValue + (*((eastl::Internal::TupleLeaf<4,$T5,0>*)&mImpl)).mValue + + + + + {(*((eastl::Internal::TupleLeaf<0,$T1,0>*)&mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf<1,$T2,0>*)&mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf<2,$T3,0>*)&mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf<3,$T4,0>*)&mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf<4,$T5,0>*)&mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf<5,$T6,0>*)&mImpl)).mValue} + ({*this,view(noparens)}) + + (*((eastl::Internal::TupleLeaf<0,$T1,0>*)&mImpl)).mValue + (*((eastl::Internal::TupleLeaf<1,$T2,0>*)&mImpl)).mValue + (*((eastl::Internal::TupleLeaf<2,$T3,0>*)&mImpl)).mValue + (*((eastl::Internal::TupleLeaf<3,$T4,0>*)&mImpl)).mValue + (*((eastl::Internal::TupleLeaf<4,$T5,0>*)&mImpl)).mValue + (*((eastl::Internal::TupleLeaf<5,$T6,0>*)&mImpl)).mValue + + + + + {(*((eastl::Internal::TupleLeaf<0,$T1,0>*)&mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf<1,$T2,0>*)&mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf<2,$T3,0>*)&mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf<3,$T4,0>*)&mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf<4,$T5,0>*)&mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf<5,$T6,0>*)&mImpl)).mValue}, {(*((eastl::Internal::TupleLeaf<6,$T7,0>*)&mImpl)).mValue} + ({*this,view(noparens)}) + + (*((eastl::Internal::TupleLeaf<0,$T1,0>*)&mImpl)).mValue + (*((eastl::Internal::TupleLeaf<1,$T2,0>*)&mImpl)).mValue + (*((eastl::Internal::TupleLeaf<2,$T3,0>*)&mImpl)).mValue + (*((eastl::Internal::TupleLeaf<3,$T4,0>*)&mImpl)).mValue + (*((eastl::Internal::TupleLeaf<4,$T5,0>*)&mImpl)).mValue + (*((eastl::Internal::TupleLeaf<5,$T6,0>*)&mImpl)).mValue + (*((eastl::Internal::TupleLeaf<6,$T7,0>*)&mImpl)).mValue + + + + + diff --git a/external/EASTL/include/EABase/config/eacompiler.h b/external/EASTL/include/EABase/config/eacompiler.h new file mode 100644 index 00000000..b75343c7 --- /dev/null +++ b/external/EASTL/include/EABase/config/eacompiler.h @@ -0,0 +1,1778 @@ +/*----------------------------------------------------------------------------- + * config/eacompiler.h + * + * Copyright (c) Electronic Arts Inc. All rights reserved. + *----------------------------------------------------------------------------- + * Currently supported defines include: + * EA_COMPILER_GNUC + * EA_COMPILER_ARM + * EA_COMPILER_EDG + * EA_COMPILER_SN + * EA_COMPILER_MSVC + * EA_COMPILER_METROWERKS + * EA_COMPILER_INTEL + * EA_COMPILER_BORLANDC + * EA_COMPILER_IBM + * EA_COMPILER_QNX + * EA_COMPILER_GREEN_HILLS + * EA_COMPILER_CLANG + * EA_COMPILER_CLANG_CL + * + * EA_COMPILER_VERSION = + * EA_COMPILER_NAME = + * EA_COMPILER_STRING = + * + * EA_COMPILER_VA_COPY_REQUIRED + * + * C++98/03 functionality + * EA_COMPILER_NO_STATIC_CONSTANTS + * EA_COMPILER_NO_TEMPLATE_SPECIALIZATION + * EA_COMPILER_NO_TEMPLATE_PARTIAL_SPECIALIZATION + * EA_COMPILER_NO_MEMBER_TEMPLATES + * EA_COMPILER_NO_MEMBER_TEMPLATE_SPECIALIZATION + * EA_COMPILER_NO_TEMPLATE_TEMPLATES + * EA_COMPILER_NO_MEMBER_TEMPLATE_FRIENDS + * EA_COMPILER_NO_VOID_RETURNS + * EA_COMPILER_NO_COVARIANT_RETURN_TYPE + * EA_COMPILER_NO_DEDUCED_TYPENAME + * EA_COMPILER_NO_ARGUMENT_DEPENDENT_LOOKUP + * EA_COMPILER_NO_EXCEPTION_STD_NAMESPACE + * EA_COMPILER_NO_EXPLICIT_FUNCTION_TEMPLATE_ARGUMENTS + * EA_COMPILER_NO_RTTI + * EA_COMPILER_NO_EXCEPTIONS + * EA_COMPILER_NO_NEW_THROW_SPEC + * EA_THROW_SPEC_NEW / EA_THROW_SPEC_DELETE + * EA_COMPILER_NO_UNWIND + * EA_COMPILER_NO_STANDARD_CPP_LIBRARY + * EA_COMPILER_NO_STATIC_VARIABLE_INIT + * EA_COMPILER_NO_STATIC_FUNCTION_INIT + * EA_COMPILER_NO_VARIADIC_MACROS + * + * C++11 functionality + * EA_COMPILER_NO_RVALUE_REFERENCES + * EA_COMPILER_NO_EXTERN_TEMPLATE + * EA_COMPILER_NO_RANGE_BASED_FOR_LOOP + * EA_COMPILER_NO_CONSTEXPR + * EA_COMPILER_NO_OVERRIDE + * EA_COMPILER_NO_INHERITANCE_FINAL + * EA_COMPILER_NO_NULLPTR + * EA_COMPILER_NO_AUTO + * EA_COMPILER_NO_DECLTYPE + * EA_COMPILER_NO_DEFAULTED_FUNCTIONS + * EA_COMPILER_NO_DELETED_FUNCTIONS + * EA_COMPILER_NO_LAMBDA_EXPRESSIONS + * EA_COMPILER_NO_TRAILING_RETURN_TYPES + * EA_COMPILER_NO_STRONGLY_TYPED_ENUMS + * EA_COMPILER_NO_FORWARD_DECLARED_ENUMS + * EA_COMPILER_NO_VARIADIC_TEMPLATES + * EA_COMPILER_NO_TEMPLATE_ALIASES + * EA_COMPILER_NO_INITIALIZER_LISTS + * EA_COMPILER_NO_NORETURN + * EA_COMPILER_NO_CARRIES_DEPENDENCY + * EA_COMPILER_NO_FALLTHROUGH + * EA_COMPILER_NO_NODISCARD + * EA_COMPILER_NO_MAYBE_UNUSED + * EA_COMPILER_NO_NONSTATIC_MEMBER_INITIALIZERS + * EA_COMPILER_NO_RIGHT_ANGLE_BRACKETS + * EA_COMPILER_NO_ALIGNOF + * EA_COMPILER_NO_ALIGNAS + * EA_COMPILER_NO_DELEGATING_CONSTRUCTORS + * EA_COMPILER_NO_INHERITING_CONSTRUCTORS + * EA_COMPILER_NO_USER_DEFINED_LITERALS + * EA_COMPILER_NO_STANDARD_LAYOUT_TYPES + * EA_COMPILER_NO_EXTENDED_SIZEOF + * EA_COMPILER_NO_INLINE_NAMESPACES + * EA_COMPILER_NO_UNRESTRICTED_UNIONS + * EA_COMPILER_NO_EXPLICIT_CONVERSION_OPERATORS + * EA_COMPILER_NO_FUNCTION_TEMPLATE_DEFAULT_ARGS + * EA_COMPILER_NO_LOCAL_CLASS_TEMPLATE_PARAMETERS + * EA_COMPILER_NO_NOEXCEPT + * EA_COMPILER_NO_RAW_LITERALS + * EA_COMPILER_NO_UNICODE_STRING_LITERALS + * EA_COMPILER_NO_NEW_CHARACTER_TYPES + * EA_COMPILER_NO_UNICODE_CHAR_NAME_LITERALS + * EA_COMPILER_NO_UNIFIED_INITIALIZATION_SYNTAX + * EA_COMPILER_NO_EXTENDED_FRIEND_DECLARATIONS + * + * C++14 functionality + * EA_COMPILER_NO_VARIABLE_TEMPLATES + * + * C++17 functionality + * EA_COMPILER_NO_INLINE_VARIABLES + * EA_COMPILER_NO_ALIGNED_NEW + * + * C++20 functionality + * EA_COMPILER_NO_DESIGNATED_INITIALIZERS + * + *----------------------------------------------------------------------------- + * + * Supplemental documentation + * EA_COMPILER_NO_STATIC_CONSTANTS + * Code such as this is legal, but some compilers fail to compile it: + * struct A{ static const a = 1; }; + * + * EA_COMPILER_NO_TEMPLATE_SPECIALIZATION + * Some compilers fail to allow template specialization, such as with this: + * template void DoSomething(U u); + * void DoSomething(int x); + * + * EA_COMPILER_NO_TEMPLATE_PARTIAL_SPECIALIZATION + * Some compilers fail to allow partial template specialization, such as with this: + * template class vector{ }; // Primary templated class. + * template class vector{ }; // Partially specialized version. + * + * EA_COMPILER_NO_MEMBER_TEMPLATES + * Some compilers fail to allow member template functions such as this: + * struct A{ template void DoSomething(U u); }; + * + * EA_COMPILER_NO_MEMBER_TEMPLATE_SPECIALIZATION + * Some compilers fail to allow member template specialization, such as with this: + * struct A{ + * template void DoSomething(U u); + * void DoSomething(int x); + * }; + * + * EA_COMPILER_NO_TEMPLATE_TEMPLATES + * Code such as this is legal: + * template class U> + * U SomeFunction(const U x) { return x.DoSomething(); } + * + * EA_COMPILER_NO_MEMBER_TEMPLATE_FRIENDS + * Some compilers fail to compile templated friends, as with this: + * struct A{ template friend class SomeFriend; }; + * This is described in the C++ Standard at 14.5.3. + * + * EA_COMPILER_NO_VOID_RETURNS + * This is legal C++: + * void DoNothing1(){ }; + * void DoNothing2(){ return DoNothing1(); } + * + * EA_COMPILER_NO_COVARIANT_RETURN_TYPE + * See the C++ standard sec 10.3,p5. + * + * EA_COMPILER_NO_DEDUCED_TYPENAME + * Some compilers don't support the use of 'typename' for + * dependent types in deduced contexts, as with this: + * template void Function(T, typename T::type); + * + * EA_COMPILER_NO_ARGUMENT_DEPENDENT_LOOKUP + * Also known as Koenig lookup. Basically, if you have a function + * that is a namespace and you call that function without prefixing + * it with the namespace the compiler should look at any arguments + * you pass to that function call and search their namespace *first* + * to see if the given function exists there. + * + * EA_COMPILER_NO_EXCEPTION_STD_NAMESPACE + * is in namespace std. Some std libraries fail to + * put the contents of in namespace std. The following + * code should normally be legal: + * void Function(){ std::terminate(); } + * + * EA_COMPILER_NO_EXPLICIT_FUNCTION_TEMPLATE_ARGUMENTS + * Some compilers fail to execute DoSomething() properly, though they + * succeed in compiling it, as with this: + * template + * bool DoSomething(int j){ return i == j; }; + * DoSomething<1>(2); + * + * EA_COMPILER_NO_EXCEPTIONS + * The compiler is configured to disallow the use of try/throw/catch + * syntax (often to improve performance). Use of such syntax in this + * case will cause a compilation error. + * + * EA_COMPILER_NO_UNWIND + * The compiler is configured to allow the use of try/throw/catch + * syntax and behaviour but disables the generation of stack unwinding + * code for responding to exceptions (often to improve performance). + * + *---------------------------------------------------------------------------*/ + +#ifndef INCLUDED_eacompiler_H +#define INCLUDED_eacompiler_H + + #include + + // Note: This is used to generate the EA_COMPILER_STRING macros + #ifndef INTERNAL_STRINGIZE + #define INTERNAL_STRINGIZE(x) INTERNAL_PRIMITIVE_STRINGIZE(x) + #endif + #ifndef INTERNAL_PRIMITIVE_STRINGIZE + #define INTERNAL_PRIMITIVE_STRINGIZE(x) #x + #endif + + // EA_COMPILER_HAS_FEATURE + #ifndef EA_COMPILER_HAS_FEATURE + #if defined(__clang__) + #define EA_COMPILER_HAS_FEATURE(x) __has_feature(x) + #else + #define EA_COMPILER_HAS_FEATURE(x) 0 + #endif + #endif + + + // EA_COMPILER_HAS_BUILTIN + #ifndef EA_COMPILER_HAS_BUILTIN + #if defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 10)) + #define EA_COMPILER_HAS_BUILTIN(x) __has_builtin(x) + #else + #define EA_COMPILER_HAS_BUILTIN(x) 0 + #endif + #endif + + + // EDG (EDG compiler front-end, used by other compilers such as SN) + #if defined(__EDG_VERSION__) + #define EA_COMPILER_EDG 1 + + #if defined(_MSC_VER) + #define EA_COMPILER_EDG_VC_MODE 1 + #endif + #if defined(__GNUC__) + #define EA_COMPILER_EDG_GCC_MODE 1 + #endif + #endif + + // EA_COMPILER_WINRTCX_ENABLED + // + // Defined as 1 if the compiler has its available C++/CX support enabled, else undefined. + // This specifically means the corresponding compilation unit has been built with Windows Runtime + // Components enabled, usually via the '-ZW' compiler flags being used. This option allows for using + // ref counted hat-type '^' objects and other C++/CX specific keywords like "ref new" + #if !defined(EA_COMPILER_WINRTCX_ENABLED) && defined(__cplusplus_winrt) + #define EA_COMPILER_WINRTCX_ENABLED 1 + #endif + + + // EA_COMPILER_CPP11_ENABLED + // + // Defined as 1 if the compiler has its available C++11 support enabled, else undefined. + // This does not mean that all of C++11 or any particular feature of C++11 is supported + // by the compiler. It means that whatever C++11 support the compiler has is enabled. + // This also includes existing and older compilers that still identify C++11 as C++0x. + // + // We cannot use (__cplusplus >= 201103L) alone because some compiler vendors have + // decided to not define __cplusplus like thus until they have fully completed their + // C++11 support. + // + #if !defined(EA_COMPILER_CPP11_ENABLED) && defined(__cplusplus) + #if (__cplusplus >= 201103L) // Clang and GCC defines this like so in C++11 mode. + #define EA_COMPILER_CPP11_ENABLED 1 + #elif defined(__GNUC__) && defined(__GXX_EXPERIMENTAL_CXX0X__) + #define EA_COMPILER_CPP11_ENABLED 1 + #elif defined(_MSC_VER) && _MSC_VER >= 1600 // Microsoft unilaterally enables its C++11 support; there is no way to disable it. + #define EA_COMPILER_CPP11_ENABLED 1 + #elif defined(__EDG_VERSION__) // && ??? + // To do: Is there a generic way to determine this? + #endif + #endif + + + // EA_COMPILER_CPP14_ENABLED + // + // Defined as 1 if the compiler has its available C++14 support enabled, else undefined. + // This does not mean that all of C++14 or any particular feature of C++14 is supported + // by the compiler. It means that whatever C++14 support the compiler has is enabled. + // + // We cannot use (__cplusplus >= 201402L) alone because some compiler vendors have + // decided to not define __cplusplus like thus until they have fully completed their + // C++14 support. + #if !defined(EA_COMPILER_CPP14_ENABLED) && defined(__cplusplus) + #if (__cplusplus >= 201402L) // Clang and GCC defines this like so in C++14 mode. + #define EA_COMPILER_CPP14_ENABLED 1 + #elif defined(_MSC_VER) && (_MSC_VER >= 1900) // VS2015+ + #define EA_COMPILER_CPP14_ENABLED 1 + #endif + #endif + + + // EA_COMPILER_CPP17_ENABLED + // + // Defined as 1 if the compiler has its available C++17 support enabled, else undefined. + // This does not mean that all of C++17 or any particular feature of C++17 is supported + // by the compiler. It means that whatever C++17 support the compiler has is enabled. + // + // We cannot use (__cplusplus >= 201703L) alone because some compiler vendors have + // decided to not define __cplusplus like thus until they have fully completed their + // C++17 support. + #if !defined(EA_COMPILER_CPP17_ENABLED) && defined(__cplusplus) + #if (__cplusplus >= 201703L) + #define EA_COMPILER_CPP17_ENABLED 1 + #elif defined(_MSVC_LANG) && (_MSVC_LANG >= 201703L) // C++17+ + #define EA_COMPILER_CPP17_ENABLED 1 + #endif + #endif + + + // EA_COMPILER_CPP20_ENABLED + // + // Defined as 1 if the compiler has its available C++20 support enabled, else undefined. + // This does not mean that all of C++20 or any particular feature of C++20 is supported + // by the compiler. It means that whatever C++20 support the compiler has is enabled. + // + // We cannot use (__cplusplus >= 202003L) alone because some compiler vendors have + // decided to not define __cplusplus like thus until they have fully completed their + // C++20 support. + #if !defined(EA_COMPILER_CPP20_ENABLED) && defined(__cplusplus) + // TODO(rparoin): enable once a C++20 value for the __cplusplus macro has been published + // #if (__cplusplus >= 202003L) + // #define EA_COMPILER_CPP20_ENABLED 1 + // #elif defined(_MSVC_LANG) && (_MSVC_LANG >= 202003L) // C++20+ + // #define EA_COMPILER_CPP20_ENABLED 1 + // #endif + #endif + + + + #if defined(__ARMCC_VERSION) + // Note that this refers to the ARM RVCT compiler (armcc or armcpp), but there + // are other compilers that target ARM processors, such as GCC and Microsoft VC++. + // If you want to detect compiling for the ARM processor, check for EA_PROCESSOR_ARM + // being defined. + // This compiler is also identified by defined(__CC_ARM) || defined(__ARMCC__). + #define EA_COMPILER_RVCT 1 + #define EA_COMPILER_ARM 1 + #define EA_COMPILER_VERSION __ARMCC_VERSION + #define EA_COMPILER_NAME "RVCT" + //#define EA_COMPILER_STRING (defined below) + + // Clang's GCC-compatible driver. + #elif defined(__clang__) && !defined(_MSC_VER) + #define EA_COMPILER_CLANG 1 + #define EA_COMPILER_VERSION (__clang_major__ * 100 + __clang_minor__) + #define EA_COMPILER_NAME "clang" + #define EA_COMPILER_STRING EA_COMPILER_NAME __clang_version__ + + // GCC (a.k.a. GNUC) + #elif defined(__GNUC__) // GCC compilers exist for many platforms. + #define EA_COMPILER_GNUC 1 + #define EA_COMPILER_VERSION (__GNUC__ * 1000 + __GNUC_MINOR__) + #define EA_COMPILER_NAME "GCC" + #define EA_COMPILER_STRING EA_COMPILER_NAME " compiler, version " INTERNAL_STRINGIZE( __GNUC__ ) "." INTERNAL_STRINGIZE( __GNUC_MINOR__ ) + + #if (__GNUC__ == 2) && (__GNUC_MINOR__ < 95) // If GCC < 2.95... + #define EA_COMPILER_NO_MEMBER_TEMPLATES 1 + #endif + #if (__GNUC__ == 2) && (__GNUC_MINOR__ <= 97) // If GCC <= 2.97... + #define EA_COMPILER_NO_MEMBER_TEMPLATE_FRIENDS 1 + #endif + #if (__GNUC__ == 3) && ((__GNUC_MINOR__ == 1) || (__GNUC_MINOR__ == 2)) // If GCC 3.1 or 3.2 (but not pre 3.1 or post 3.2)... + #define EA_COMPILER_NO_EXPLICIT_FUNCTION_TEMPLATE_ARGUMENTS 1 + #endif + + // Borland C++ + #elif defined(__BORLANDC__) + #define EA_COMPILER_BORLANDC 1 + #define EA_COMPILER_VERSION __BORLANDC__ + #define EA_COMPILER_NAME "Borland C" + //#define EA_COMPILER_STRING (defined below) + + #if (__BORLANDC__ <= 0x0550) // If Borland C++ Builder 4 and 5... + #define EA_COMPILER_NO_MEMBER_TEMPLATE_FRIENDS 1 + #endif + #if (__BORLANDC__ >= 0x561) && (__BORLANDC__ < 0x600) + #define EA_COMPILER_NO_MEMBER_FUNCTION_SPECIALIZATION 1 + #endif + + + // Intel C++ + // The Intel Windows compiler masquerades as VC++ and defines _MSC_VER. + // The Intel compiler is based on the EDG compiler front-end. + #elif defined(__ICL) || defined(__ICC) + #define EA_COMPILER_INTEL 1 + + // Should we enable the following? We probably should do so since enabling it does a lot more good than harm + // for users. The Intel Windows compiler does a pretty good job of emulating VC++ and so the user would likely + // have to handle few special cases where the Intel compiler doesn't emulate VC++ correctly. + #if defined(_MSC_VER) + #define EA_COMPILER_MSVC 1 + #define EA_COMPILER_MICROSOFT 1 + #endif + + // Should we enable the following? This isn't as clear because as of this writing we don't know if the Intel + // compiler truly emulates GCC well enough that enabling this does more good than harm. + #if defined(__GNUC__) + #define EA_COMPILER_GNUC 1 + #endif + + #if defined(__ICL) + #define EA_COMPILER_VERSION __ICL + #elif defined(__ICC) + #define EA_COMPILER_VERSION __ICC + #endif + #define EA_COMPILER_NAME "Intel C++" + #if defined(_MSC_VER) + #define EA_COMPILER_STRING EA_COMPILER_NAME " compiler, version " INTERNAL_STRINGIZE( EA_COMPILER_VERSION ) ", EDG version " INTERNAL_STRINGIZE( __EDG_VERSION__ ) ", VC++ version " INTERNAL_STRINGIZE( _MSC_VER ) + #elif defined(__GNUC__) + #define EA_COMPILER_STRING EA_COMPILER_NAME " compiler, version " INTERNAL_STRINGIZE( EA_COMPILER_VERSION ) ", EDG version " INTERNAL_STRINGIZE( __EDG_VERSION__ ) ", GCC version " INTERNAL_STRINGIZE( __GNUC__ ) + #else + #define EA_COMPILER_STRING EA_COMPILER_NAME " compiler, version " INTERNAL_STRINGIZE( EA_COMPILER_VERSION ) ", EDG version " INTERNAL_STRINGIZE( __EDG_VERSION__ ) + #endif + + + #elif defined(_MSC_VER) + #define EA_COMPILER_MSVC 1 + #define EA_COMPILER_MICROSOFT 1 + #define EA_COMPILER_VERSION _MSC_VER + #define EA_COMPILER_NAME "Microsoft Visual C++" + //#define EA_COMPILER_STRING (defined below) + + #if defined(__clang__) + // Clang's MSVC-compatible driver. + #define EA_COMPILER_CLANG_CL 1 + #endif + + #define EA_STANDARD_LIBRARY_MSVC 1 + #define EA_STANDARD_LIBRARY_MICROSOFT 1 + + #if (_MSC_VER <= 1200) // If VC6.x and earlier... + #if (_MSC_VER < 1200) + #define EA_COMPILER_MSVCOLD 1 + #else + #define EA_COMPILER_MSVC6 1 + #endif + + #if (_MSC_VER < 1200) // If VC5.x or earlier... + #define EA_COMPILER_NO_TEMPLATE_SPECIALIZATION 1 + #endif + #define EA_COMPILER_NO_EXPLICIT_FUNCTION_TEMPLATE_ARGUMENTS 1 // The compiler compiles this OK, but executes it wrong. Fixed in VC7.0 + #define EA_COMPILER_NO_VOID_RETURNS 1 // The compiler fails to compile such cases. Fixed in VC7.0 + #define EA_COMPILER_NO_EXCEPTION_STD_NAMESPACE 1 // The compiler fails to compile such cases. Fixed in VC7.0 + #define EA_COMPILER_NO_DEDUCED_TYPENAME 1 // The compiler fails to compile such cases. Fixed in VC7.0 + #define EA_COMPILER_NO_STATIC_CONSTANTS 1 // The compiler fails to compile such cases. Fixed in VC7.0 + #define EA_COMPILER_NO_COVARIANT_RETURN_TYPE 1 // The compiler fails to compile such cases. Fixed in VC7.1 + #define EA_COMPILER_NO_ARGUMENT_DEPENDENT_LOOKUP 1 // The compiler compiles this OK, but executes it wrong. Fixed in VC7.1 + #define EA_COMPILER_NO_TEMPLATE_TEMPLATES 1 // The compiler fails to compile such cases. Fixed in VC7.1 + #define EA_COMPILER_NO_TEMPLATE_PARTIAL_SPECIALIZATION 1 // The compiler fails to compile such cases. Fixed in VC7.1 + #define EA_COMPILER_NO_MEMBER_TEMPLATE_FRIENDS 1 // The compiler fails to compile such cases. Fixed in VC7.1 + //#define EA_COMPILER_NO_MEMBER_TEMPLATES 1 // VC6.x supports member templates properly 95% of the time. So do we flag the remaining 5%? + //#define EA_COMPILER_NO_MEMBER_TEMPLATE_SPECIALIZATION 1 // VC6.x supports member templates properly 95% of the time. So do we flag the remaining 5%? + + #elif (_MSC_VER <= 1300) // If VC7.0 and earlier... + #define EA_COMPILER_MSVC7 1 + + #define EA_COMPILER_NO_COVARIANT_RETURN_TYPE 1 // The compiler fails to compile such cases. Fixed in VC7.1 + #define EA_COMPILER_NO_ARGUMENT_DEPENDENT_LOOKUP 1 // The compiler compiles this OK, but executes it wrong. Fixed in VC7.1 + #define EA_COMPILER_NO_TEMPLATE_TEMPLATES 1 // The compiler fails to compile such cases. Fixed in VC7.1 + #define EA_COMPILER_NO_TEMPLATE_PARTIAL_SPECIALIZATION 1 // The compiler fails to compile such cases. Fixed in VC7.1 + #define EA_COMPILER_NO_MEMBER_TEMPLATE_FRIENDS 1 // The compiler fails to compile such cases. Fixed in VC7.1 + #define EA_COMPILER_NO_MEMBER_FUNCTION_SPECIALIZATION 1 // This is the case only for VC7.0 and not VC6 or VC7.1+. Fixed in VC7.1 + //#define EA_COMPILER_NO_MEMBER_TEMPLATES 1 // VC7.0 supports member templates properly 95% of the time. So do we flag the remaining 5%? + + #elif (_MSC_VER < 1400) // VS2003 _MSC_VER of 1300 means VC7 (VS2003) + // The VC7.1 and later compiler is fairly close to the C++ standard + // and thus has no compiler limitations that we are concerned about. + #define EA_COMPILER_MSVC7_2003 1 + #define EA_COMPILER_MSVC7_1 1 + + #elif (_MSC_VER < 1500) // VS2005 _MSC_VER of 1400 means VC8 (VS2005) + #define EA_COMPILER_MSVC8_2005 1 + #define EA_COMPILER_MSVC8_0 1 + + #elif (_MSC_VER < 1600) // VS2008. _MSC_VER of 1500 means VC9 (VS2008) + #define EA_COMPILER_MSVC9_2008 1 + #define EA_COMPILER_MSVC9_0 1 + + #elif (_MSC_VER < 1700) // VS2010 _MSC_VER of 1600 means VC10 (VS2010) + #define EA_COMPILER_MSVC_2010 1 + #define EA_COMPILER_MSVC10_0 1 + + #elif (_MSC_VER < 1800) // VS2012 _MSC_VER of 1700 means VS2011/VS2012 + #define EA_COMPILER_MSVC_2011 1 // Microsoft changed the name to VS2012 before shipping, despite referring to it as VS2011 up to just a few weeks before shipping. + #define EA_COMPILER_MSVC11_0 1 + #define EA_COMPILER_MSVC_2012 1 + #define EA_COMPILER_MSVC12_0 1 + + #elif (_MSC_VER < 1900) // VS2013 _MSC_VER of 1800 means VS2013 + #define EA_COMPILER_MSVC_2013 1 + #define EA_COMPILER_MSVC13_0 1 + + #elif (_MSC_VER < 1910) // VS2015 _MSC_VER of 1900 means VS2015 + #define EA_COMPILER_MSVC_2015 1 + #define EA_COMPILER_MSVC14_0 1 + + #elif (_MSC_VER < 1911) // VS2017 _MSC_VER of 1910 means VS2017 + #define EA_COMPILER_MSVC_2017 1 + #define EA_COMPILER_MSVC15_0 1 + + #endif + + + // IBM + #elif defined(__xlC__) + #define EA_COMPILER_IBM 1 + #define EA_COMPILER_NAME "IBM XL C" + #define EA_COMPILER_VERSION __xlC__ + #define EA_COMPILER_STRING "IBM XL C compiler, version " INTERNAL_STRINGIZE( __xlC__ ) + + // Unknown + #else // Else the compiler is unknown + + #define EA_COMPILER_VERSION 0 + #define EA_COMPILER_NAME "Unknown" + + #endif + + #ifndef EA_COMPILER_STRING + #define EA_COMPILER_STRING EA_COMPILER_NAME " compiler, version " INTERNAL_STRINGIZE(EA_COMPILER_VERSION) + #endif + + + // Deprecated definitions + // For backwards compatibility, should be supported for at least the life of EABase v2.0.x. + #ifndef EA_COMPILER_NO_TEMPLATE_PARTIAL_SPECIALIZATION + #define EA_COMPILER_PARTIAL_TEMPLATE_SPECIALIZATION 1 + #endif + #ifndef EA_COMPILER_NO_TEMPLATE_SPECIALIZATION + #define EA_COMPILER_TEMPLATE_SPECIALIZATION 1 + #endif + #ifndef EA_COMPILER_NO_MEMBER_TEMPLATES + #define EA_COMPILER_MEMBER_TEMPLATES 1 + #endif + #ifndef EA_COMPILER_NO_MEMBER_TEMPLATE_SPECIALIZATION + #define EA_COMPILER_MEMBER_TEMPLATE_SPECIALIZATION 1 + #endif + + + + /////////////////////////////////////////////////////////////////////////////// + // EA_COMPILER_VA_COPY_REQUIRED + // + // Defines whether va_copy must be used to copy or save va_list objects between uses. + // Some compilers on some platforms implement va_list whereby its contents + // are destroyed upon usage, even if passed by value to another function. + // With these compilers you can use va_copy to save and restore a va_list. + // Known compiler/platforms that destroy va_list contents upon usage include: + // CodeWarrior on PowerPC + // GCC on x86-64 + // However, va_copy is part of the C99 standard and not part of earlier C and + // C++ standards. So not all compilers support it. VC++ doesn't support va_copy, + // but it turns out that VC++ doesn't usually need it on the platforms it supports, + // and va_copy can usually be implemented via memcpy(va_list, va_list) with VC++. + /////////////////////////////////////////////////////////////////////////////// + + #ifndef EA_COMPILER_VA_COPY_REQUIRED + #if ((defined(__GNUC__) && (__GNUC__ >= 3)) || defined(__clang__)) && (!defined(__i386__) || defined(__x86_64__)) && !defined(__ppc__) && !defined(__PPC__) && !defined(__PPC64__) + #define EA_COMPILER_VA_COPY_REQUIRED 1 + #endif + #endif + + + // EA_COMPILER_NO_RTTI + // + // If EA_COMPILER_NO_RTTI is defined, then RTTI (run-time type information) + // is not available (possibly due to being disabled by the user). + // + #if defined(__EDG_VERSION__) && !defined(__RTTI) + #define EA_COMPILER_NO_RTTI 1 + #elif defined(__clang__) && !EA_COMPILER_HAS_FEATURE(cxx_rtti) + #define EA_COMPILER_NO_RTTI 1 + #elif defined(__IBMCPP__) && !defined(__RTTI_ALL__) + #define EA_COMPILER_NO_RTTI 1 + #elif defined(__GXX_ABI_VERSION) && !defined(__GXX_RTTI) + #define EA_COMPILER_NO_RTTI 1 + #elif defined(_MSC_VER) && !defined(_CPPRTTI) + #define EA_COMPILER_NO_RTTI 1 + #elif defined(__ARMCC_VERSION) && defined(__TARGET_CPU_MPCORE) && !defined(__RTTI) + #define EA_COMPILER_NO_RTTI 1 + #endif + + + + // EA_COMPILER_NO_EXCEPTIONS / EA_COMPILER_NO_UNWIND + // + // If EA_COMPILER_NO_EXCEPTIONS is defined, then the compiler is + // configured to not recognize C++ exception-handling statements + // such as try/catch/throw. Thus, when EA_COMPILER_NO_EXCEPTIONS is + // defined, code that attempts to use exception handling statements + // will usually cause a compilation error. If is often desirable + // for projects to disable exception handling because exception + // handling causes extra code and/or data generation which might + // not be needed, especially if it is known that exceptions won't + // be happening. When writing code that is to be portable between + // systems of which some enable exception handling while others + // don't, check for EA_COMPILER_NO_EXCEPTIONS being defined. + // + #if !defined(EA_COMPILER_NO_EXCEPTIONS) && !defined(EA_COMPILER_NO_UNWIND) + #if defined(EA_COMPILER_GNUC) && defined(_NO_EX) // GCC on some platforms defines _NO_EX when exceptions are disabled. + #define EA_COMPILER_NO_EXCEPTIONS 1 + + #elif (defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_INTEL) || defined(EA_COMPILER_RVCT)) && !defined(__EXCEPTIONS) // GCC and most EDG-based compilers define __EXCEPTIONS when exception handling is enabled. + #define EA_COMPILER_NO_EXCEPTIONS 1 + + #elif (defined(EA_COMPILER_MSVC)) && !defined(_CPPUNWIND) + #define EA_COMPILER_NO_UNWIND 1 + + #endif // EA_COMPILER_NO_EXCEPTIONS / EA_COMPILER_NO_UNWIND + #endif // !defined(EA_COMPILER_NO_EXCEPTIONS) && !defined(EA_COMPILER_NO_UNWIND) + + + // ------------------------------------------------------------------------ + // EA_DISABLE_ALL_VC_WARNINGS / EA_RESTORE_ALL_VC_WARNINGS + // + // Disable and re-enable all warning(s) within code. + // + // Example usage: + // EA_DISABLE_ALL_VC_WARNINGS() + // + // EA_RESTORE_ALL_VC_WARNINGS() + // + //This is duplicated from EABase's eacompilertraits.h + #ifndef EA_DISABLE_ALL_VC_WARNINGS + #if defined(_MSC_VER) + #define EA_DISABLE_ALL_VC_WARNINGS() \ + __pragma(warning(push, 0)) \ + __pragma(warning(disable: 4244 4265 4267 4350 4472 4509 4548 4623 4710 4985 6320 4755 4625 4626 4702 4668)) // Some warnings need to be explicitly called out. + #else + #define EA_DISABLE_ALL_VC_WARNINGS() + #endif + #endif + + //This is duplicated from EABase's eacompilertraits.h + #ifndef EA_RESTORE_ALL_VC_WARNINGS + #if defined(_MSC_VER) + #define EA_RESTORE_ALL_VC_WARNINGS() \ + __pragma(warning(pop)) + #else + #define EA_RESTORE_ALL_VC_WARNINGS() + #endif + #endif + + // Dinkumware + //This is duplicated from EABase's eahave.h + #if !defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && !defined(EA_NO_HAVE_DINKUMWARE_CPP_LIBRARY) + #if defined(__cplusplus) + EA_DISABLE_ALL_VC_WARNINGS() + #include // Need to trigger the compilation of yvals.h without directly using because it might not exist. + EA_RESTORE_ALL_VC_WARNINGS() + #endif + + #if defined(__cplusplus) && defined(_CPPLIB_VER) /* If using the Dinkumware Standard library... */ + #define EA_HAVE_DINKUMWARE_CPP_LIBRARY 1 + #else + #define EA_NO_HAVE_DINKUMWARE_CPP_LIBRARY 1 + #endif + #endif + + + // EA_COMPILER_NO_ALIGNED_NEW + // + // + #if !defined(EA_COMPILER_NO_ALIGNED_NEW) + #if defined(_HAS_ALIGNED_NEW) && _HAS_ALIGNED_NEW // VS2017 15.5 Preview + // supported. + #elif defined(EA_COMPILER_CPP17_ENABLED) + // supported. + #else + #define EA_COMPILER_NO_ALIGNED_NEW 1 + #endif + #endif + + // EA_COMPILER_NO_NEW_THROW_SPEC / EA_THROW_SPEC_NEW / EA_THROW_SPEC_DELETE + // + // If defined then the compiler's version of operator new is not decorated + // with a throw specification. This is useful for us to know because we + // often want to write our own overloaded operator new implementations. + // We need such operator new overrides to be declared identically to the + // way the compiler is defining operator new itself. + // + // Example usage: + // void* operator new(std::size_t) EA_THROW_SPEC_NEW(std::bad_alloc); + // void* operator new[](std::size_t) EA_THROW_SPEC_NEW(std::bad_alloc); + // void* operator new(std::size_t, const std::nothrow_t&) EA_THROW_SPEC_NEW_NONE(); + // void* operator new[](std::size_t, const std::nothrow_t&) EA_THROW_SPEC_NEW_NONE(); + // void operator delete(void*) EA_THROW_SPEC_DELETE_NONE(); + // void operator delete[](void*) EA_THROW_SPEC_DELETE_NONE(); + // void operator delete(void*, const std::nothrow_t&) EA_THROW_SPEC_DELETE_NONE(); + // void operator delete[](void*, const std::nothrow_t&) EA_THROW_SPEC_DELETE_NONE(); + // + #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) + #if defined(_MSC_VER) && (_MSC_VER >= 1912) // VS2017 15.3+ + #define EA_THROW_SPEC_NEW(x) noexcept(false) + #define EA_THROW_SPEC_NEW_NONE() noexcept + #define EA_THROW_SPEC_DELETE_NONE() noexcept + + #elif defined(_MSC_VER) && (_MSC_VER >= 1910) // VS2017+ + #define EA_THROW_SPEC_NEW(x) throw(x) + #define EA_THROW_SPEC_NEW_NONE() throw() + #define EA_THROW_SPEC_DELETE_NONE() throw() + + #else + #if defined(EA_PLATFORM_SONY) + #define EA_THROW_SPEC_NEW(X) _THROWS(X) + #elif defined(_MSC_VER) + // Disabled warning "nonstandard extension used: 'throw (...)'" as this warning is a W4 warning which is usually off by default + // and doesn't convey any important information but will still complain when building with /Wall (which most teams do) + #define EA_THROW_SPEC_NEW(X) __pragma(warning(push)) __pragma(warning(disable: 4987)) _THROWS(X) __pragma(warning(pop)) + #else + #define EA_THROW_SPEC_NEW(X) _THROW1(X) + #endif + #define EA_THROW_SPEC_NEW_NONE() _THROW0() + #define EA_THROW_SPEC_DELETE_NONE() _THROW0() + + #endif + #elif defined(EA_COMPILER_NO_EXCEPTIONS) && !defined(EA_COMPILER_RVCT) && !defined(EA_PLATFORM_LINUX) && !defined(EA_PLATFORM_APPLE) && !defined(CS_UNDEFINED_STRING) + #define EA_COMPILER_NO_NEW_THROW_SPEC 1 + + #define EA_THROW_SPEC_NEW(x) + #define EA_THROW_SPEC_NEW_NONE() + #define EA_THROW_SPEC_DELETE_NONE() + #else + #define EA_THROW_SPEC_NEW(x) throw(x) + #define EA_THROW_SPEC_NEW_NONE() throw() + #define EA_THROW_SPEC_DELETE_NONE() throw() + #endif + + + // EA_COMPILER_NO_STANDARD_CPP_LIBRARY + // + // If defined, then the compiler doesn't provide a Standard C++ library. + // + #if defined(EA_PLATFORM_ANDROID) + // Disabled because EA's eaconfig/android_config/android_sdk packages currently + // don't support linking STL libraries. Perhaps we can figure out what linker arguments + // are needed for an app so we can manually specify them and then re-enable this code. + //#include + // + //#if (__ANDROID_API__ < 9) // Earlier versions of Android provide no std C++ STL implementation. + #define EA_COMPILER_NO_STANDARD_CPP_LIBRARY 1 + //#endif + #endif + + + // EA_COMPILER_NO_STATIC_VARIABLE_INIT + // + // If defined, it means that global or static C++ variables will be + // constructed. Not all compiler/platorm combinations support this. + // User code that needs to be portable must avoid having C++ variables + // that construct before main. + // + //#if defined(EA_PLATFORM_MOBILE) + // #define EA_COMPILER_NO_STATIC_VARIABLE_INIT 1 + //#endif + + + // EA_COMPILER_NO_STATIC_FUNCTION_INIT + // + // If defined, it means that functions marked as startup functions + // (e.g. __attribute__((constructor)) in GCC) are supported. It may + // be that some compiler/platform combinations don't support this. + // + //#if defined(XXX) // So far, all compiler/platforms we use support this. + // #define EA_COMPILER_NO_STATIC_VARIABLE_INIT 1 + //#endif + + // EA_COMPILER_NO_VARIADIC_MACROS + // + // If defined, the compiler doesn't support C99/C++11 variadic macros. + // With a variadic macro, you can do this: + // #define MY_PRINTF(format, ...) printf(format, __VA_ARGS__) + // + #if !defined(EA_COMPILER_NO_VARIADIC_MACROS) + #if defined(_MSC_VER) && (_MSC_VER < 1500) // If earlier than VS2008.. + #define EA_COMPILER_NO_VARIADIC_MACROS 1 + #elif defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__)) < 401 // If earlier than GCC 4.1.. + #define EA_COMPILER_NO_VARIADIC_MACROS 1 + #elif defined(EA_COMPILER_EDG) // Includes other compilers + // variadic macros are supported + #endif + #endif + + + // EA_COMPILER_NO_RVALUE_REFERENCES + // + // If defined, the compiler doesn't fully support C++11 rvalue reference semantics. + // This applies to the compiler only and not the Standard Library in use with the compiler, + // which is required by the Standard to have some support itself. + // + #if !defined(EA_COMPILER_NO_RVALUE_REFERENCES) + #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (_MSC_VER >= 1600) // VS2010+ + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 403) // EDG 4.3+. + // supported. Earlier EDG supported a subset of rvalue references. Implicit move constructors and assignment operators aren't supported until EDG 4.5. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && EA_COMPILER_HAS_FEATURE(cxx_rvalue_references) + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4005) // GCC 4.5+ + // supported. + #else + #define EA_COMPILER_NO_RVALUE_REFERENCES 1 + #endif + #endif + + + // EA_COMPILER_NO_EXTERN_TEMPLATE + // + // If defined, the compiler doesn't support C++11 extern template. + // With extern templates, you can do this: + // extern template void DoSomething(KnownType u); + // + #if !defined(EA_COMPILER_NO_EXTERN_TEMPLATE) + #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (_MSC_VER >= 1700) // VS2012+... + // Extern template is supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 401) // EDG 4.1+. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && defined(__apple_build_version__) && (EA_COMPILER_VERSION >= 401) + // Extern template is supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && !defined(__apple_build_version__) // Clang other than Apple's Clang + // Extern template is supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4006) // GCC 4.6+ + // Extern template is supported. + #else + #define EA_COMPILER_NO_EXTERN_TEMPLATE 1 + #endif + #endif + + + // EA_COMPILER_NO_RANGE_BASED_FOR_LOOP + // + // If defined, the compiler doesn't support C++11 range-based for loops. + // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2009/n2930.html + // You must #include for range-based for loops to work. + // Example usage: + // #include + // #include + // std::vector floatVector; + // for(float& f : floatVector) + // f += 1.0; + // + #if !defined(EA_COMPILER_NO_RANGE_BASED_FOR_LOOP) + #if defined(EA_COMPILER_CPP11_ENABLED) && (defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1700)) // VS2012+... + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 405) // EDG 4.5+. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && (defined(__clang__) && (EA_COMPILER_VERSION >= 300)) // Clang 3.x+ + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && (defined(__GNUC__) && (EA_COMPILER_VERSION >= 4006)) // GCC 4.6+ + // supported. + #else + #define EA_COMPILER_NO_RANGE_BASED_FOR_LOOP 1 + #endif + #endif + + + // EA_COMPILER_NO_CONSTEXPR + // + // Refers to C++11 = constexpr (const expression) declarations. + // + #if !defined(EA_COMPILER_NO_CONSTEXPR) + #if defined(EA_COMPILER_CPP11_ENABLED) && (defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1900)) // VS2015+... Not present in VC++ up to and including VS2013. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 406) // EDG 4.6+. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && EA_COMPILER_HAS_FEATURE(cxx_constexpr) + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4006) // GCC 4.6+ + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1900) // VS 2015+ + // supported. + #else + #define EA_COMPILER_NO_CONSTEXPR 1 + #endif + #endif + + + // EA_COMPILER_NO_CONSTEXPR_IF + // + // Refers to C++17 = constexpr if(const expression) conditionals. + // + #if !defined(EA_COMPILER_NO_CONSTEXPR_IF) + #if defined(EA_COMPILER_CPP17_ENABLED) && (defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1911)) // VS2017 15.3+ + // supported. + #elif defined(EA_COMPILER_CPP17_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 309) // Clang 3.9+ + // supported. + #elif defined(EA_COMPILER_CPP17_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 7000) // GCC 7+ + // supported. + #else + #define EA_COMPILER_NO_CONSTEXPR_IF 1 + #endif + #endif + + + // EA_COMPILER_NO_OVERRIDE + // + // Refers to the C++11 override specifier. + // + #ifndef EA_COMPILER_NO_OVERRIDE + #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION > 1600) // VC++ > VS2010, even without C++11 support. VS2010 does support override, however will generate warnings due to the keyword being 'non-standard' + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 209) // Clang 2.9+ + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4007) // GCC 4.7+ + // supported. + #else + #define EA_COMPILER_NO_OVERRIDE 1 + #endif + #endif + + + // EA_COMPILER_NO_INHERITANCE_FINAL + // + // Refers to the C++11 final specifier. + // + #ifndef EA_COMPILER_NO_INHERITANCE_FINAL + #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1500) // VS2008+, even without C++11 support. + // supported, though you need to use EA_INHERITANCE_FINAL for it to work with VS versions prior to 2012. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 209) // Clang 2.9+ + // supported + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4007) // GCC 4.7+ + // supported + #else + #define EA_COMPILER_NO_INHERITANCE_FINAL 1 + #endif + #endif + + + // EA_COMPILER_NO_AUTO + // + // Refers to C++11 auto. + // + #if !defined(EA_COMPILER_NO_AUTO) + #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1600) // VS2010+ + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 401) // EDG 4.1+. + // supported with the exception of the usage of braced initializer lists as of EDG 4.3. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 209) // Clang 2.9+, including Apple's Clang. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4004) // GCC 4.4+ + // supported. + #else + #define EA_COMPILER_NO_AUTO 1 + #endif + #endif + + + // EA_COMPILER_NO_NULLPTR + // + // Refers to C++11 nullptr (which is a built in type). std::nullptr_t is defined in C++11 . + // Note that implements a portable nullptr implementation. + // + #if !defined(EA_COMPILER_NO_NULLPTR) + #if (defined(_MSC_VER) && (_MSC_VER >= 1600)) && defined(EA_COMPILER_CPP11_ENABLED) + // supported + #elif defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4006) && defined(EA_COMPILER_CPP11_ENABLED) + // supported + #elif defined(__clang__) && defined(EA_COMPILER_CPP11_ENABLED) + // supported + #elif defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 403) && defined(EA_COMPILER_CPP11_ENABLED) + // supported + #else + #define EA_COMPILER_NO_NULLPTR 1 + #endif + #endif + + + // EA_COMPILER_NO_DECLTYPE + // + // Refers to C++11 decltype. + // + #if !defined(EA_COMPILER_NO_DECLTYPE) + #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1600) // VS2010+ + // supported, though VS2010 doesn't support the spec completely as specified in the final standard. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 401) // EDG 4.1+. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 209) // Clang 2.9+, including Apple's Clang. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4003) // GCC 4.3+ + // supported. + #else + #define EA_COMPILER_NO_DECLTYPE 1 + #endif + #endif + + + + // EA_COMPILER_NO_DEFAULTED_FUNCTIONS + // EA_COMPILER_NO_DELETED_FUNCTIONS + // + // Refers to C++11 = default and = delete function declarations. + // + #if !defined(EA_COMPILER_NO_DEFAULTED_FUNCTIONS) + #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1800) // VS2013+ + // supported, but as of VS2013 it isn't supported for defaulted move constructors and move assignment operators. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 401) // EDG 4.1+. + // supported, but as of EDG 4.3 it isn't supported for defaulted move constructors and move assignment operators until EDG 4.5. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 300) // Clang 3.0+, including Apple's Clang + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4004) // GCC 4.4+ + // supported. + #else + // VC++ doesn't support it as of VS2012. + #define EA_COMPILER_NO_DEFAULTED_FUNCTIONS 1 + #endif + #endif + + #if !defined(EA_COMPILER_NO_DELETED_FUNCTIONS) + #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1800) // VS2013+ + // supported, but as of VS2013 it isn't supported for defaulted move constructors and move assignment operators. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 401) // EDG 4.1+. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 209) // Clang 2.9+ + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4004) // GCC 4.4+ + // supported. + #else + // VC++ doesn't support it as of VS2012. + #define EA_COMPILER_NO_DELETED_FUNCTIONS 1 + #endif + #endif + + + // EA_COMPILER_NO_LAMBDA_EXPRESSIONS + // + // Refers to C++11 lambda expressions. + // + #if !defined(EA_COMPILER_NO_LAMBDA_EXPRESSIONS) + #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1600) // VS2010+ + // supported, though VS2010 doesn't support the spec completely as specified in the final standard. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 401) // EDG 4.1+. + // supported. However, converting lambdas to function pointers is not supported until EDG 4.5. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 401) && defined(__apple_build_version__) + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 301) && !defined(__apple_build_version__) // Clang 3.1+, not including Apple's Clang. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4004) // GCC 4.4+ + // supported. + #else + #define EA_COMPILER_NO_LAMBDA_EXPRESSIONS 1 + #endif + #endif + + + // EA_COMPILER_NO_TRAILING_RETURN_TYPES + // + // Refers to C++11 trailing-return-type. Also sometimes referred to as "incomplete return type". + // + #if !defined(EA_COMPILER_NO_TRAILING_RETURN_TYPES) + #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1600) // VS2010+ + // supported, though VS2010 doesn't support the spec completely as specified in the final standard. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 402) // EDG 4.2+. + // supported. However, use of "this" in trailing return types is not supported untiil EDG 4.4 + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 401) && defined(__apple_build_version__) + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 301) && !defined(__apple_build_version__) // Clang 3.1+, not including Apple's Clang. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4004) // GCC 4.4+ + // supported. + #else + #define EA_COMPILER_NO_TRAILING_RETURN_TYPES 1 + #endif + #endif + + + // EA_COMPILER_NO_STRONGLY_TYPED_ENUMS + // + // Refers to C++11 strongly typed enums, which includes enum classes and sized enums. Doesn't include forward-declared enums. + // + #if !defined(EA_COMPILER_NO_STRONGLY_TYPED_ENUMS) + #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1700) // VS2012+ + // supported. A subset of this is actually supported by VS2010. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 400) // EDG 4.0+. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 209) // Clang 2.9+, including Apple's Clang. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4004) // GCC 4.4+ + // supported. + #else + #define EA_COMPILER_NO_STRONGLY_TYPED_ENUMS 1 + #endif + #endif + + + // EA_COMPILER_NO_FORWARD_DECLARED_ENUMS + // + // Refers to C++11 forward declared enums. + // + #if !defined(EA_COMPILER_NO_FORWARD_DECLARED_ENUMS) + #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1700) // VS2012+ + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 405) // EDG 4.5+. + // supported. EDG 4.3 supports basic forward-declared enums, but not forward-declared strongly typed enums. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 401) && defined(__apple_build_version__) + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 301) && !defined(__apple_build_version__) // Clang 3.1+, not including Apple's Clang. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4006) // GCC 4.6+ + // supported. + #else + #define EA_COMPILER_NO_FORWARD_DECLARED_ENUMS 1 + #endif + #endif + + + // EA_COMPILER_NO_VARIADIC_TEMPLATES + // + // Refers to C++11 variadic templates. + // + #if !defined(EA_COMPILER_NO_VARIADIC_TEMPLATES) + #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1800) // VS2013+. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (_MSC_FULL_VER == 170051025) // VS2012 November Preview for Windows only. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 403) // EDG 4.3+. + // supported, though 4.1 has partial support for variadic templates. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 209) // Clang 2.9+, including Apple's Clang. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4004) // GCC 4.4+ + // supported, though GCC 4.3 has partial support for variadic templates. + #else + #define EA_COMPILER_NO_VARIADIC_TEMPLATES 1 + #endif + #endif + + + // EA_COMPILER_NO_TEMPLATE_ALIASES + // + // Refers to C++11 alias templates. + // Example alias template usage: + // template + // using Dictionary = eastl::map; + // + // Dictionary StringIntDictionary; + // + #if !defined(EA_COMPILER_NO_TEMPLATE_ALIASES) + #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1800) // VS2013+. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 401) && defined(__apple_build_version__) + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 402) // EDG 4.2+. + // supported, though 4.1 has partial support for variadic templates. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 300) && !defined(__apple_build_version__) // Clang 3.0+, not including Apple's Clang. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4007) // GCC 4.7+ + // supported, though GCC 4.3 has partial support for variadic templates. + #else + #define EA_COMPILER_NO_TEMPLATE_ALIASES 1 + #endif + #endif + + + // EA_COMPILER_NO_VARIABLE_TEMPLATES + // + // Refers to C++14 variable templates. + // Example variable template usage: + // template + // constexpr T pi = T(3.1415926535897932385); + // + #if !defined(EA_COMPILER_NO_VARIABLE_TEMPLATES) + #if defined(_MSC_VER) && (_MSC_FULL_VER >= 190023918) // VS2015 Update 2 and above. + // supported. + #elif defined(EA_COMPILER_CPP14_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 304) && !defined(__apple_build_version__) // Clang 3.4+, not including Apple's Clang. + // supported. + #elif defined(EA_COMPILER_CPP14_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 5000) // GCC 5+ + // supported. + #elif !defined(EA_COMPILER_CPP14_ENABLED) + #define EA_COMPILER_NO_VARIABLE_TEMPLATES 1 + #endif + #endif + + + // EA_COMPILER_NO_INLINE_VARIABLES + // + // Refers to C++17 inline variables that allows the definition of variables in header files + // + // Example usage: + // struct Foo + // { + // static inline constexpr int kConstant = 42; // no out of class definition + // }; + // + // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2015/n4424.pdf + // http://en.cppreference.com/w/cpp/language/inline + // + #if !defined(EA_COMPILER_NO_INLINE_VARIABLES) + #define EA_COMPILER_NO_INLINE_VARIABLES 1 + #endif + + + // EA_COMPILER_NO_INITIALIZER_LISTS + // + // Refers to C++11 initializer lists. + // This refers to the compiler support for this and not the Standard Library support (std::initializer_list). + // + #if !defined(EA_COMPILER_NO_INITIALIZER_LISTS) + #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1800) // VS2013+. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (_MSC_FULL_VER == 170051025) // VS2012 November Preview for Windows only. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 405) // EDG 4.5+. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 401) && defined(__apple_build_version__) + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 301) && !defined(__apple_build_version__) // Clang 3.1+, not including Apple's Clang. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4004) // GCC 4.4+ + // supported, though GCC 4.3 has partial support for it. + #else + #define EA_COMPILER_NO_INITIALIZER_LISTS 1 + #endif + #endif + + + // EA_COMPILER_NO_NORETURN + // + // Refers to C++11 declaration attribute: noreturn. + // http://en.cppreference.com/w/cpp/language/attributes + // http://blog.aaronballman.com/2011/09/understanding-attributes/ + // + #if !defined(EA_COMPILER_NO_NORETURN) + #if defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1300) // VS2003+ + // supported via __declspec(noreturn). You need to use that or EA_NORETURN. VC++ up to VS2013 doesn't support any C++11 attribute types. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 402) // EDG 4.2+. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 401) && defined(__apple_build_version__) + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 300) && !defined(__apple_build_version__) // Clang 3.0+, not including Apple's Clang. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4008) // GCC 4.8+ + // supported. + #else + #define EA_COMPILER_NO_NORETURN 1 + #endif + #endif + + + // EA_COMPILER_NO_CARRIES_DEPENDENCY + // + // Refers to C++11 declaration attribute: carries_dependency. + // http://en.cppreference.com/w/cpp/language/attributes + // http://blog.aaronballman.com/2011/09/understanding-attributes/ + // + #if !defined(EA_COMPILER_NO_CARRIES_DEPENDENCY) + #if defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 401) && defined(__apple_build_version__) // Apple clang 4.1+ + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 402) // EDG 4.2+. + // supported; stricter than other compilers in its usage. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 300) && !defined(__apple_build_version__) // Clang 3.0+, not including Apple's Clang. + // supported. + // Currently GNUC doesn't appear to support this attribute. + //#elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4008) // GCC 4.8+ + // // supported. + #else + #define EA_COMPILER_NO_CARRIES_DEPENDENCY 1 + #endif + #endif + + + // EA_COMPILER_NO_FALLTHROUGH + // + // Refers to C++17 declaration attribute: fallthrough. + // http://en.cppreference.com/w/cpp/language/attributes + // + #if !defined(EA_COMPILER_NO_FALLTHROUGH) + #if defined(EA_COMPILER_CPP17_ENABLED) + // supported. + #else + #define EA_COMPILER_NO_FALLTHROUGH 1 + #endif + #endif + + + // EA_COMPILER_NO_NODISCARD + // + // Refers to C++17 declaration attribute: nodiscard. + // http://en.cppreference.com/w/cpp/language/attributes + // + #if !defined(EA_COMPILER_NO_NODISCARD) + #if defined(EA_COMPILER_CPP17_ENABLED) + // supported. + #else + #define EA_COMPILER_NO_NODISCARD 1 + #endif + #endif + + + // EA_COMPILER_NO_MAYBE_UNUSED + // + // Refers to C++17 declaration attribute: maybe_unused. + // http://en.cppreference.com/w/cpp/language/attributes + // + #if !defined(EA_COMPILER_NO_MAYBE_UNUSED) + #if defined(EA_COMPILER_CPP17_ENABLED) + // supported. + #elif defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1912) // VS2017 15.3+ + // supported. + #else + #define EA_COMPILER_NO_MAYBE_UNUSED 1 + #endif + #endif + + + // EA_COMPILER_NO_STRUCTURED_BINDING + // + // Indicates if target compiler supports the C++17 "structured binding" language feature. + // https://en.cppreference.com/w/cpp/language/structured_binding + // + // + #if !defined(EA_COMPILER_NO_STRUCTURED_BINDING) + #if defined(EA_COMPILER_CPP17_ENABLED) + // supported. + #elif defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1912) // VS2017 15.3+ + // supported. + #else + #define EA_COMPILER_NO_STRUCTURED_BINDING 1 + #endif + #endif + + + // EA_COMPILER_NO_DESIGNATED_INITIALIZERS + // + // Indicates the target compiler supports the C++20 "designated initializer" language feature. + // https://en.cppreference.com/w/cpp/language/aggregate_initialization + // + // Example: + // struct A { int x; int y; }; + // A a = { .y = 42, .x = 1 }; + // + #if !defined(EA_COMPILER_NO_DESIGNATED_INITIALIZERS) + #if defined(EA_COMPILER_CPP20_ENABLED) + // supported. + #else + #define EA_COMPILER_NO_DESIGNATED_INITIALIZERS 1 + #endif + #endif + + + // EA_COMPILER_NO_NONSTATIC_MEMBER_INITIALIZERS + // + // Refers to C++11 declaration attribute: carries_dependency. + // http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2008/n2756.htm + // + #if !defined(EA_COMPILER_NO_NONSTATIC_MEMBER_INITIALIZERS) + #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1800) // VS2013+. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 401) && defined(__apple_build_version__) // Apple clang 4.1+ + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 300) && !defined(__apple_build_version__) // Clang 3.0+, not including Apple's Clang. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4007) // GCC 4.7+ + // supported. + #else + #define EA_COMPILER_NO_NONSTATIC_MEMBER_INITIALIZERS 1 + #endif + #endif + + + // EA_COMPILER_NO_RIGHT_ANGLE_BRACKETS + // + // Defines if the compiler supports >> (as opposed to > >) in template + // declarations such as typedef eastl::list> ListList; + // + #if !defined(EA_COMPILER_NO_RIGHT_ANGLE_BRACKETS) + #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1600) // VS2010+ + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 401) // EDG 4.1+. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 209) // Clang 2.9+, including Apple's Clang. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4003) // GCC 4.3+ + // supported. + #else + #define EA_COMPILER_NO_RIGHT_ANGLE_BRACKETS 1 + #endif + #endif + + + // EA_COMPILER_NO_ALIGNOF + // + // Refers specifically to C++11 alignof and not old compiler extensions such as __alignof__(). + // However, EABase provides a portable EA_ALIGN_OF which works for all compilers. + // + #if !defined(EA_COMPILER_NO_ALIGNOF) + // Not supported by VC++ as of VS2013, though EA_ALIGN_OF is supported on all coompilers as an alternative. + #if defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 209) // Clang 2.9+, including Apple's Clang. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4005) // GCC 4.5+ + // supported. + #else + #define EA_COMPILER_NO_ALIGNOF 1 + #endif + #endif + + + // EA_COMPILER_NO_ALIGNAS + // + // Refers to C++11 alignas. + // + #if !defined(EA_COMPILER_NO_ALIGNAS) + // Not supported by VC++ as of VS2013. + #if defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 401) && defined(__apple_build_version__) // Apple clang 4.1+ + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 300) && !defined(__apple_build_version__) // Clang 3.0+, not including Apple's Clang. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4008) // GCC 4.8+ + // supported. + #else + #define EA_COMPILER_NO_ALIGNAS 1 + #endif + #endif + + + // EA_COMPILER_NO_DELEGATING_CONSTRUCTORS + // + // Refers to C++11 constructor delegation. + // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n1986.pdf + // https://www.ibm.com/developerworks/mydeveloperworks/blogs/5894415f-be62-4bc0-81c5-3956e82276f3/entry/c_0x_delegating_constructors + // + #if !defined(EA_COMPILER_NO_DELEGATING_CONSTRUCTORS) + #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1800) // VS2013+. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 407) // EDG 4.7+. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 401) && defined(__apple_build_version__) // Apple clang 4.1+ + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 300) && !defined(__apple_build_version__) // Clang 3.0+, not including Apple's Clang. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4007) // GCC 4.7+ + // supported. + #else + #define EA_COMPILER_NO_DELEGATING_CONSTRUCTORS 1 + #endif + #endif + + + // EA_COMPILER_NO_INHERITING_CONSTRUCTORS + // + // Refers to C++11 constructor inheritance via 'using'. + // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2540.htm + // + #if !defined(EA_COMPILER_NO_INHERITING_CONSTRUCTORS) + // Not supported by VC++ as of VS2013. + #if defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && EA_COMPILER_HAS_FEATURE(cxx_inheriting_constructors) // Clang + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4008) // GCC 4.8+ + // supported. + #else + #define EA_COMPILER_NO_INHERITING_CONSTRUCTORS 1 + #endif + #endif + + + // EA_COMPILER_NO_USER_DEFINED_LITERALS + // + // http://en.cppreference.com/w/cpp/language/user_literal + // http://stackoverflow.com/questions/237804/what-new-capabilities-do-user-defined-literals-add-to-c + // + #if !defined(EA_COMPILER_NO_USER_DEFINED_LITERALS) + // Not supported by VC++ as of VS2013. + #if defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 401) && defined(__apple_build_version__) // Apple clang 4.1+ + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 301) && !defined(__apple_build_version__) // Clang 3.1+, not including Apple's Clang. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4007) // GCC 4.7+ + // supported. + #else + #define EA_COMPILER_NO_USER_DEFINED_LITERALS 1 + #endif + #endif + + + // EA_COMPILER_NO_STANDARD_LAYOUT_TYPES + // a.k.a. POD relaxation + // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2342.htm + // + #if !defined(EA_COMPILER_NO_STANDARD_LAYOUT_TYPES) + #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1700) // VS2012+ + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 401) && defined(__apple_build_version__) // Apple clang 4.1+ + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 300) && !defined(__apple_build_version__) // Clang 3.0+, not including Apple's Clang. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4005) // GCC 4.5+ + // supported. + #else + #define EA_COMPILER_NO_STANDARD_LAYOUT_TYPES 1 + #endif + #endif + + + // EA_COMPILER_NO_EXTENDED_SIZEOF + // + // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2253.html + // Allows you to do this: sizeof(SomeClass::mSomeMember) + // + #if !defined(EA_COMPILER_NO_EXTENDED_SIZEOF) + // Not supported by VC++ as of VS2013. + #if defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 401) && defined(__apple_build_version__) // Apple clang 4.1+ + // supported. + // Versions of EDG prior to 4.5 only support extended sizeof in non-member functions. Full support was added in 4.5 + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 405) // EDG 4.5+. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 301) && !defined(__apple_build_version__) // Clang 3.1+, not including Apple's Clang. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4005) // GCC 4.5+ + // supported. + #else + #define EA_COMPILER_NO_EXTENDED_SIZEOF 1 + #endif + #endif + + + // EA_COMPILER_NO_INLINE_NAMESPACES + // + // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2535.htm + // http://blog.aaronballman.com/2011/07/inline-namespaces/ + // + #if !defined(EA_COMPILER_NO_INLINE_NAMESPACES) + // Not supported by VC++ as of VS2013. + #if defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 405) // EDG 4.5+. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 209) // Clang 2.9+, including Apple's Clang. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4004) // GCC 4.4+ + // supported. + #else + #define EA_COMPILER_NO_INLINE_NAMESPACES 1 + #endif + #endif + + + // EA_COMPILER_NO_UNRESTRICTED_UNIONS + // + // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2544.pdf + // + #if !defined(EA_COMPILER_NO_UNRESTRICTED_UNIONS) + // Not supported by VC++ as of VS2013. + #if defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 406) // EDG 4.6+. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 401) && defined(__apple_build_version__) // Apple clang 4.1+ + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 301) && !defined(__apple_build_version__) // Clang 3.1+, not including Apple's Clang. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4006) // GCC 4.6+ + // supported. + #else + #define EA_COMPILER_NO_UNRESTRICTED_UNIONS 1 + #endif + #endif + + + // EA_COMPILER_NO_EXPLICIT_CONVERSION_OPERATORS + // + // http://en.wikipedia.org/wiki/C%2B%2B11#Explicit_conversion_operators + // + #if !defined(EA_COMPILER_NO_EXPLICIT_CONVERSION_OPERATORS) + #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1800) // VS2013+. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (_MSC_FULL_VER == 170051025) // VS2012 November Preview for Windows only. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 404) // EDG 4.4+. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 401) && defined(__apple_build_version__) // Apple clang 4.1+ + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 300) && !defined(__apple_build_version__) // Clang 3.0+, not including Apple's Clang. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4005) // GCC 4.5+ + // supported. + #else + #define EA_COMPILER_NO_EXPLICIT_CONVERSION_OPERATORS 1 + #endif + #endif + + + // EA_COMPILER_NO_FUNCTION_TEMPLATE_DEFAULT_ARGS + // + // The compiler does not support default template arguments for function templates. + // http://stackoverflow.com/questions/2447458/default-template-arguments-for-function-templates + // + #if !defined(EA_COMPILER_NO_FUNCTION_TEMPLATE_DEFAULT_ARGS) + #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1800) // VS2013+. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 403) // EDG 4.4+. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 209) // Clang 2.9+, including Apple's Clang. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4003) // GCC 4.3+ + // supported. + #else + #define EA_COMPILER_NO_FUNCTION_TEMPLATE_DEFAULT_ARGS 1 + #endif + #endif + + + // EA_COMPILER_NO_LOCAL_CLASS_TEMPLATE_PARAMETERS + // + // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm + // http://stackoverflow.com/questions/5751977/local-type-as-template-arguments-in-c + // + #if !defined(EA_COMPILER_NO_LOCAL_CLASS_TEMPLATE_PARAMETERS) + #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1600) // VS2010+ + // supported. + #if (EA_COMPILER_VERSION < 1700) // VS2010 generates a warning, but the C++ language now allows it. + #pragma warning(disable: 4836) // nonstandard extension used: local types or unnamed types cannot be used as template arguments. + #endif + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 402) // EDG 4.2+. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 209) // Clang 2.9+, including Apple's Clang. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4005) // GCC 4.5+ + // supported. + #else + #define EA_COMPILER_NO_LOCAL_CLASS_TEMPLATE_PARAMETERS 1 + #endif + #endif + + + // EA_COMPILER_NO_NOEXCEPT + // + // C++11 noexcept + // http://en.cppreference.com/w/cpp/language/attributes + // http://en.cppreference.com/w/cpp/language/noexcept + // + #if !defined(EA_COMPILER_NO_NOEXCEPT) + #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1900) // VS2014+ + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 401) && defined(__apple_build_version__) // Apple clang 4.1+ + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 405) // EDG 4.5+. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 300) && !defined(__apple_build_version__) // Clang 3.0+, not including Apple's Clang. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4006) // GCC 4.6+ + // supported. + #else + #define EA_COMPILER_NO_NOEXCEPT 1 + #endif + #endif + + + // EA_COMPILER_NO_RAW_LITERALS + // + // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2442.htm + // http://en.wikipedia.org/wiki/C%2B%2B11#New_string_literals + // + #if !defined(EA_COMPILER_NO_RAW_LITERALS) + #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1800) // VS2013+. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 407) // EDG 4.7+. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 401) && defined(__apple_build_version__) // Apple clang 4.1+ + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 300) && !defined(__apple_build_version__) // Clang 3.0+, not including Apple's Clang. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4005) // GCC 4.5+ + // supported. + #else + #define EA_COMPILER_NO_RAW_LITERALS 1 + #endif + #endif + + + // EA_COMPILER_NO_UNICODE_STRING_LITERALS + // + // http://en.wikipedia.org/wiki/C%2B%2B11#New_string_literals + // + #if !defined(EA_COMPILER_NO_UNICODE_STRING_LITERALS) + // Not supported by VC++ as of VS2013. + #if defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 407) // EDG 4.7+. + // supported. It's not clear if it's v4.4 or v4.7 that adds this support. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 401) && defined(__apple_build_version__) // Apple clang 4.1+ + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 300) && !defined(__apple_build_version__) // Clang 3.0+, not including Apple's Clang. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4004) // GCC 4.4+ + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 407) // EDG 4.7+. + // supported. It's not clear if it's v4.4 or v4.7 that adds this support. + #else + #define EA_COMPILER_NO_UNICODE_STRING_LITERALS 1 + #endif + #endif + + + // EA_COMPILER_NO_NEW_CHARACTER_TYPES + // + // Refers to char16_t and char32_t as true native types (and not something simply typedef'd from uint16_t and uint32_t). + // http://en.cppreference.com/w/cpp/language/types + // + #if !defined(EA_COMPILER_NO_NEW_CHARACTER_TYPES) + #if defined(EA_COMPILER_NO_UNICODE_STRING_LITERALS) // Some compilers have had support for char16_t prior to support for u"", but it's not useful to have the former without the latter. + #define EA_COMPILER_NO_NEW_CHARACTER_TYPES 1 + #endif + #endif + + + // EA_COMPILER_NO_UNICODE_CHAR_NAME_LITERALS + // + // C++ 11 relaxed \u\U sequences in strings. + // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2170.html + // + #if !defined(EA_COMPILER_NO_UNICODE_CHAR_NAME_LITERALS) + // VC++ up till at least VS2013 supports \u and \U but supports them wrong with respect to the C++11 Standard. + + #if defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 401) && defined(__apple_build_version__) // Apple clang 4.1+ + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 301) && !defined(__apple_build_version__) // Clang 3.1+, not including Apple's Clang. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4005) // GCC 4.5+ + // supported. + #else + #define EA_COMPILER_NO_UNICODE_CHAR_NAME_LITERALS 1 + #endif + #endif + + + // EA_COMPILER_NO_UNIFIED_INITIALIZATION_SYNTAX + // + // http://en.wikipedia.org/wiki/C%2B%2B11#Uniform_initialization + // + #if !defined(EA_COMPILER_NO_UNIFIED_INITIALIZATION_SYNTAX) + #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1800) // VS2013+. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 401) && defined(__apple_build_version__) // Apple clang 4.1+ + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 301) && !defined(__apple_build_version__) // Clang 3.1+, not including Apple's Clang. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4004) // GCC 4.4+ + // supported. + #else + #define EA_COMPILER_NO_UNIFIED_INITIALIZATION_SYNTAX 1 + #endif + #endif + + + // EA_COMPILER_NO_EXTENDED_FRIEND_DECLARATIONS + // + // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1791.pdf + // + #if !defined(EA_COMPILER_NO_EXTENDED_FRIEND_DECLARATIONS) + #if defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1600) // VS2010+ + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 401) // EDG 4.1+. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && (EA_COMPILER_VERSION >= 209) // Clang 2.9+, including Apple's Clang. + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4007) // GCC 4.7+ + // supported. + #else + #define EA_COMPILER_NO_EXTENDED_FRIEND_DECLARATIONS 1 + #endif + #endif + + + // EA_COMPILER_NO_THREAD_LOCAL + // + // Refers specifically to C++ thread_local, which is like compiler __thread implementations except + // that it also supports non-trivial classes (e.g. with ctors). EA_COMPILER_NO_THREAD_LOCAL refers + // specifically to full C++11 thread_local support. The EAThread package provides a wrapper for + // __thread via EA_THREAD_LOCAL (which unfortunately sounds like C++ thread_local). + // + // https://en.cppreference.com/w/cpp/keyword/thread_local + // + #if !defined(EA_COMPILER_NO_THREAD_LOCAL) + #if defined(EA_COMPILER_CPP11_ENABLED) && defined(__clang__) && EA_COMPILER_HAS_FEATURE(cxx_thread_local) + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(_MSC_VER) && (EA_COMPILER_VERSION >= 1900) // VS2015+ + // supported. + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(__GNUC__) && (EA_COMPILER_VERSION >= 4008) // GCC 4.8+ + // supported. + #else + #define EA_COMPILER_NO_THREAD_LOCAL 1 + #endif + #endif + + +#endif // INCLUDED_eacompiler_H + + + + + diff --git a/external/EASTL/include/EABase/config/eacompilertraits.h b/external/EASTL/include/EABase/config/eacompilertraits.h new file mode 100644 index 00000000..20873748 --- /dev/null +++ b/external/EASTL/include/EABase/config/eacompilertraits.h @@ -0,0 +1,2590 @@ +/*----------------------------------------------------------------------------- + * config/eacompilertraits.h + * + * Copyright (c) Electronic Arts Inc. All rights reserved. + *----------------------------------------------------------------------------- + * Currently supported defines include: + * EA_PREPROCESSOR_JOIN + * + * EA_COMPILER_IS_ANSIC + * EA_COMPILER_IS_C99 + * EA_COMPILER_IS_C11 + * EA_COMPILER_HAS_C99_TYPES + * EA_COMPILER_IS_CPLUSPLUS + * EA_COMPILER_MANAGED_CPP + * EA_COMPILER_INTMAX_SIZE + * EA_OFFSETOF + * EA_SIZEOF_MEMBER + * + * EA_ALIGN_OF() + * EA_ALIGN_MAX_STATIC / EA_ALIGN_MAX_AUTOMATIC + * EA_ALIGN() / EA_PREFIX_ALIGN() / EA_POSTFIX_ALIGN() + * EA_ALIGNED() + * EA_PACKED() + * + * EA_LIKELY() + * EA_UNLIKELY() + * EA_INIT_PRIORITY() + * EA_MAY_ALIAS() + * EA_ASSUME() + * EA_ANALYSIS_ASSUME() + * EA_PURE + * EA_WEAK + * EA_UNUSED() + * EA_EMPTY() + * + * EA_WCHAR_T_NON_NATIVE + * EA_WCHAR_SIZE = + * + * EA_RESTRICT + * EA_DEPRECATED / EA_PREFIX_DEPRECATED / EA_POSTFIX_DEPRECATED + * EA_FORCE_INLINE / EA_PREFIX_FORCE_INLINE / EA_POSTFIX_FORCE_INLINE + * EA_NO_INLINE / EA_PREFIX_NO_INLINE / EA_POSTFIX_NO_INLINE + * EA_NO_VTABLE / EA_CLASS_NO_VTABLE / EA_STRUCT_NO_VTABLE + * EA_PASCAL + * EA_PASCAL_FUNC() + * EA_SSE = [0 | 1] + * EA_IMPORT + * EA_EXPORT + * EA_PRAGMA_ONCE_SUPPORTED + * EA_ONCE + * EA_OVERRIDE + * EA_INHERITANCE_FINAL + * EA_SEALED + * EA_ABSTRACT + * EA_CONSTEXPR / EA_CONSTEXPR_OR_CONST + * EA_CONSTEXPR_IF + * EA_EXTERN_TEMPLATE + * EA_NOEXCEPT + * EA_NORETURN + * EA_CARRIES_DEPENDENCY + * EA_NON_COPYABLE / struct EANonCopyable + * EA_OPTIMIZE_OFF / EA_OPTIMIZE_ON + * EA_SIGNED_RIGHT_SHIFT_IS_UNSIGNED + * + * EA_DISABLE_VC_WARNING / EA_RESTORE_VC_WARNING / EA_DISABLE_ALL_VC_WARNINGS / EA_RESTORE_ALL_VC_WARNINGS + * EA_DISABLE_GCC_WARNING / EA_RESTORE_GCC_WARNING + * EA_DISABLE_CLANG_WARNING / EA_RESTORE_CLANG_WARNING + * EA_DISABLE_SN_WARNING / EA_RESTORE_SN_WARNING / EA_DISABLE_ALL_SN_WARNINGS / EA_RESTORE_ALL_SN_WARNINGS + * EA_DISABLE_GHS_WARNING / EA_RESTORE_GHS_WARNING + * EA_DISABLE_EDG_WARNING / EA_RESTORE_EDG_WARNING + * EA_DISABLE_CW_WARNING / EA_RESTORE_CW_WARNING + * + * EA_DISABLE_DEFAULT_CTOR + * EA_DISABLE_COPY_CTOR + * EA_DISABLE_MOVE_CTOR + * EA_DISABLE_ASSIGNMENT_OPERATOR + * EA_DISABLE_MOVE_OPERATOR + * + * Todo: + * Find a way to reliably detect wchar_t size at preprocessor time and + * implement it below for EA_WCHAR_SIZE. + * + * Todo: + * Find out how to support EA_PASCAL and EA_PASCAL_FUNC for systems in + * which it hasn't yet been found out for. + *---------------------------------------------------------------------------*/ + + +#ifndef INCLUDED_eacompilertraits_H +#define INCLUDED_eacompilertraits_H + + #include + #include + + + // Metrowerks uses #defines in its core C header files to define + // the kind of information we need below (e.g. C99 compatibility) + + + + // Determine if this compiler is ANSI C compliant and if it is C99 compliant. + #if defined(__STDC__) + #define EA_COMPILER_IS_ANSIC 1 // The compiler claims to be ANSI C + + // Is the compiler a C99 compiler or equivalent? + // From ISO/IEC 9899:1999: + // 6.10.8 Predefined macro names + // __STDC_VERSION__ The integer constant 199901L. (150) + // + // 150) This macro was not specified in ISO/IEC 9899:1990 and was + // specified as 199409L in ISO/IEC 9899/AMD1:1995. The intention + // is that this will remain an integer constant of type long int + // that is increased with each revision of this International Standard. + // + #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) + #define EA_COMPILER_IS_C99 1 + #endif + + // Is the compiler a C11 compiler? + // From ISO/IEC 9899:2011: + // Page 176, 6.10.8.1 (Predefined macro names) : + // __STDC_VERSION__ The integer constant 201112L. (178) + // + #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) + #define EA_COMPILER_IS_C11 1 + #endif + #endif + + // Some compilers (e.g. GCC) define __USE_ISOC99 if they are not + // strictly C99 compilers (or are simply C++ compilers) but are set + // to use C99 functionality. Metrowerks defines _MSL_C99 as 1 in + // this case, but 0 otherwise. + #if (defined(__USE_ISOC99) || (defined(_MSL_C99) && (_MSL_C99 == 1))) && !defined(EA_COMPILER_IS_C99) + #define EA_COMPILER_IS_C99 1 + #endif + + // Metrowerks defines C99 types (e.g. intptr_t) instrinsically when in C99 mode (-lang C99 on the command line). + #if (defined(_MSL_C99) && (_MSL_C99 == 1)) + #define EA_COMPILER_HAS_C99_TYPES 1 + #endif + + #if defined(__GNUC__) + #if (((__GNUC__ * 100) + __GNUC_MINOR__) >= 302) // Also, GCC defines _HAS_C9X. + #define EA_COMPILER_HAS_C99_TYPES 1 // The compiler is not necessarily a C99 compiler, but it defines C99 types. + + #ifndef __STDC_LIMIT_MACROS + #define __STDC_LIMIT_MACROS 1 + #endif + + #ifndef __STDC_CONSTANT_MACROS + #define __STDC_CONSTANT_MACROS 1 // This tells the GCC compiler that we want it to use its native C99 types. + #endif + #endif + #endif + + #if defined(_MSC_VER) && (_MSC_VER >= 1600) + #define EA_COMPILER_HAS_C99_TYPES 1 + #endif + + #ifdef __cplusplus + #define EA_COMPILER_IS_CPLUSPLUS 1 + #endif + + + // ------------------------------------------------------------------------ + // EA_PREPROCESSOR_JOIN + // + // This macro joins the two arguments together, even when one of + // the arguments is itself a macro (see 16.3.1 in C++98 standard). + // This is often used to create a unique name with __LINE__. + // + // For example, this declaration: + // char EA_PREPROCESSOR_JOIN(unique_, __LINE__); + // expands to this: + // char unique_73; + // + // Note that all versions of MSVC++ up to at least version 7.1 + // fail to properly compile macros that use __LINE__ in them + // when the "program database for edit and continue" option + // is enabled. The result is that __LINE__ gets converted to + // something like __LINE__(Var+37). + // + #ifndef EA_PREPROCESSOR_JOIN + #define EA_PREPROCESSOR_JOIN(a, b) EA_PREPROCESSOR_JOIN1(a, b) + #define EA_PREPROCESSOR_JOIN1(a, b) EA_PREPROCESSOR_JOIN2(a, b) + #define EA_PREPROCESSOR_JOIN2(a, b) a##b + #endif + + + // ------------------------------------------------------------------------ + // EA_STRINGIFY + // + // Example usage: + // printf("Line: %s", EA_STRINGIFY(__LINE__)); + // + #ifndef EA_STRINGIFY + #define EA_STRINGIFY(x) EA_STRINGIFYIMPL(x) + #define EA_STRINGIFYIMPL(x) #x + #endif + + + // ------------------------------------------------------------------------ + // EA_IDENTITY + // + #ifndef EA_IDENTITY + #define EA_IDENTITY(x) x + #endif + + + // ------------------------------------------------------------------------ + // EA_COMPILER_MANAGED_CPP + // Defined if this is being compiled with Managed C++ extensions + #ifdef EA_COMPILER_MSVC + #if EA_COMPILER_VERSION >= 1300 + #ifdef _MANAGED + #define EA_COMPILER_MANAGED_CPP 1 + #endif + #endif + #endif + + + // ------------------------------------------------------------------------ + // EA_COMPILER_INTMAX_SIZE + // + // This is related to the concept of intmax_t uintmax_t, but is available + // in preprocessor form as opposed to compile-time form. At compile-time + // you can use intmax_t and uintmax_t to use the actual types. + // + #if defined(__GNUC__) && defined(__x86_64__) + #define EA_COMPILER_INTMAX_SIZE 16 // intmax_t is __int128_t (GCC extension) and is 16 bytes. + #else + #define EA_COMPILER_INTMAX_SIZE 8 // intmax_t is int64_t and is 8 bytes. + #endif + + + + // ------------------------------------------------------------------------ + // EA_LPAREN / EA_RPAREN / EA_COMMA / EA_SEMI + // + // These are used for using special characters in macro-using expressions. + // Note that this macro intentionally uses (), as in some cases it can't + // work unless it does. + // + // Example usage: + // int x = SOME_MACRO(SomeTemplate); + // + #ifndef EA_LPAREN + #define EA_LPAREN() ( + #endif + #ifndef EA_RPAREN + #define EA_RPAREN() ) + #endif + #ifndef EA_COMMA + #define EA_COMMA() , + #endif + #ifndef EA_SEMI + #define EA_SEMI() ; + #endif + + + + + // ------------------------------------------------------------------------ + // EA_OFFSETOF + // Implements a portable version of the non-standard offsetof macro. + // + // The offsetof macro is guaranteed to only work with POD types. However, we wish to use + // it for non-POD types but where we know that offsetof will still work for the cases + // in which we use it. GCC unilaterally gives a warning when using offsetof with a non-POD, + // even if the given usage happens to work. So we make a workaround version of offsetof + // here for GCC which has the same effect but tricks the compiler into not issuing the warning. + // The 65536 does the compiler fooling; the reinterpret_cast prevents the possibility of + // an overloaded operator& for the class getting in the way. + // + // Example usage: + // struct A{ int x; int y; }; + // size_t n = EA_OFFSETOF(A, y); + // + #if defined(__GNUC__) // We can't use GCC 4's __builtin_offsetof because it mistakenly complains about non-PODs that are really PODs. + #define EA_OFFSETOF(struct_, member_) ((size_t)(((uintptr_t)&reinterpret_cast((((struct_*)65536)->member_))) - 65536)) + #else + #define EA_OFFSETOF(struct_, member_) offsetof(struct_, member_) + #endif + + // ------------------------------------------------------------------------ + // EA_SIZEOF_MEMBER + // Implements a portable way to determine the size of a member. + // + // The EA_SIZEOF_MEMBER simply returns the size of a member within a class or struct; member + // access rules still apply. We offer two approaches depending on the compiler's support for non-static member + // initializers although most C++11 compilers support this. + // + // Example usage: + // struct A{ int x; int y; }; + // size_t n = EA_SIZEOF_MEMBER(A, y); + // + #ifndef EA_COMPILER_NO_EXTENDED_SIZEOF + #define EA_SIZEOF_MEMBER(struct_, member_) (sizeof(struct_::member_)) + #else + #define EA_SIZEOF_MEMBER(struct_, member_) (sizeof(((struct_*)0)->member_)) + #endif + + // ------------------------------------------------------------------------ + // alignment expressions + // + // Here we define + // EA_ALIGN_OF(type) // Returns size_t. + // EA_ALIGN_MAX_STATIC // The max align value that the compiler will respect for EA_ALIGN for static data (global and static variables). Some compilers allow high values, some allow no more than 8. EA_ALIGN_MIN is assumed to be 1. + // EA_ALIGN_MAX_AUTOMATIC // The max align value for automatic variables (variables declared as local to a function). + // EA_ALIGN(n) // Used as a prefix. n is byte alignment, with being a power of two. Most of the time you can use this and avoid using EA_PREFIX_ALIGN/EA_POSTFIX_ALIGN. + // EA_ALIGNED(t, v, n) // Type, variable, alignment. Used to align an instance. You should need this only for unusual compilers. + // EA_PACKED // Specifies that the given structure be packed (and not have its members aligned). + // + // Also we define the following for rare cases that it's needed. + // EA_PREFIX_ALIGN(n) // n is byte alignment, with being a power of two. You should need this only for unusual compilers. + // EA_POSTFIX_ALIGN(n) // Valid values for n are 1, 2, 4, 8, etc. You should need this only for unusual compilers. + // + // Example usage: + // size_t x = EA_ALIGN_OF(int); Non-aligned equivalents. Meaning + // EA_PREFIX_ALIGN(8) int x = 5; int x = 5; Align x on 8 for compilers that require prefix attributes. Can just use EA_ALIGN instead. + // EA_ALIGN(8) int x; int x; Align x on 8 for compilers that allow prefix attributes. + // int x EA_POSTFIX_ALIGN(8); int x; Align x on 8 for compilers that require postfix attributes. + // int x EA_POSTFIX_ALIGN(8) = 5; int x = 5; Align x on 8 for compilers that require postfix attributes. + // int x EA_POSTFIX_ALIGN(8)(5); int x(5); Align x on 8 for compilers that require postfix attributes. + // struct EA_PREFIX_ALIGN(8) X { int x; } EA_POSTFIX_ALIGN(8); struct X { int x; }; Define X as a struct which is aligned on 8 when used. + // EA_ALIGNED(int, x, 8) = 5; int x = 5; Align x on 8. + // EA_ALIGNED(int, x, 16)(5); int x(5); Align x on 16. + // EA_ALIGNED(int, x[3], 16); int x[3]; Align x array on 16. + // EA_ALIGNED(int, x[3], 16) = { 1, 2, 3 }; int x[3] = { 1, 2, 3 }; Align x array on 16. + // int x[3] EA_PACKED; int x[3]; Pack the 3 ints of the x array. GCC doesn't seem to support packing of int arrays. + // struct EA_ALIGN(32) X { int x; int y; }; struct X { int x; }; Define A as a struct which is aligned on 32 when used. + // EA_ALIGN(32) struct X { int x; int y; } Z; struct X { int x; } Z; Define A as a struct, and align the instance Z on 32. + // struct X { int x EA_PACKED; int y EA_PACKED; }; struct X { int x; int y; }; Pack the x and y members of struct X. + // struct X { int x; int y; } EA_PACKED; struct X { int x; int y; }; Pack the members of struct X. + // typedef EA_ALIGNED(int, int16, 16); int16 n16; typedef int int16; int16 n16; Define int16 as an int which is aligned on 16. + // typedef EA_ALIGNED(X, X16, 16); X16 x16; typedef X X16; X16 x16; Define X16 as an X which is aligned on 16. + + #if !defined(EA_ALIGN_MAX) // If the user hasn't globally set an alternative value... + #if defined(EA_PROCESSOR_ARM) // ARM compilers in general tend to limit automatic variables to 8 or less. + #define EA_ALIGN_MAX_STATIC 1048576 + #define EA_ALIGN_MAX_AUTOMATIC 1 // Typically they support only built-in natural aligment types (both arm-eabi and apple-abi). + #elif defined(EA_PLATFORM_APPLE) + #define EA_ALIGN_MAX_STATIC 1048576 + #define EA_ALIGN_MAX_AUTOMATIC 16 + #else + #define EA_ALIGN_MAX_STATIC 1048576 // Arbitrarily high value. What is the actual max? + #define EA_ALIGN_MAX_AUTOMATIC 1048576 + #endif + #endif + + // EDG intends to be compatible with GCC but has a bug whereby it + // fails to support calling a constructor in an aligned declaration when + // using postfix alignment attributes. Prefix works for alignment, but does not align + // the size like postfix does. Prefix also fails on templates. So gcc style post fix + // is still used, but the user will need to use EA_POSTFIX_ALIGN before the constructor parameters. + #if defined(__GNUC__) && (__GNUC__ < 3) + #define EA_ALIGN_OF(type) ((size_t)__alignof__(type)) + #define EA_ALIGN(n) + #define EA_PREFIX_ALIGN(n) + #define EA_POSTFIX_ALIGN(n) __attribute__((aligned(n))) + #define EA_ALIGNED(variable_type, variable, n) variable_type variable __attribute__((aligned(n))) + #define EA_PACKED __attribute__((packed)) + + // GCC 3.x+, IBM, and clang support prefix attributes. + #elif (defined(__GNUC__) && (__GNUC__ >= 3)) || defined(__xlC__) || defined(__clang__) + #define EA_ALIGN_OF(type) ((size_t)__alignof__(type)) + #define EA_ALIGN(n) __attribute__((aligned(n))) + #define EA_PREFIX_ALIGN(n) + #define EA_POSTFIX_ALIGN(n) __attribute__((aligned(n))) + #define EA_ALIGNED(variable_type, variable, n) variable_type variable __attribute__((aligned(n))) + #define EA_PACKED __attribute__((packed)) + + // Metrowerks supports prefix attributes. + // Metrowerks does not support packed alignment attributes. + #elif defined(EA_COMPILER_INTEL) || defined(CS_UNDEFINED_STRING) || (defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1300)) + #define EA_ALIGN_OF(type) ((size_t)__alignof(type)) + #define EA_ALIGN(n) __declspec(align(n)) + #define EA_PREFIX_ALIGN(n) EA_ALIGN(n) + #define EA_POSTFIX_ALIGN(n) + #define EA_ALIGNED(variable_type, variable, n) EA_ALIGN(n) variable_type variable + #define EA_PACKED // See EA_PRAGMA_PACK_VC for an alternative. + + // Arm brand compiler + #elif defined(EA_COMPILER_ARM) + #define EA_ALIGN_OF(type) ((size_t)__ALIGNOF__(type)) + #define EA_ALIGN(n) __align(n) + #define EA_PREFIX_ALIGN(n) __align(n) + #define EA_POSTFIX_ALIGN(n) + #define EA_ALIGNED(variable_type, variable, n) __align(n) variable_type variable + #define EA_PACKED __packed + + #else // Unusual compilers + // There is nothing we can do about some of these. This is not as bad a problem as it seems. + // If the given platform/compiler doesn't support alignment specifications, then it's somewhat + // likely that alignment doesn't matter for that platform. Otherwise they would have defined + // functionality to manipulate alignment. + #define EA_ALIGN(n) + #define EA_PREFIX_ALIGN(n) + #define EA_POSTFIX_ALIGN(n) + #define EA_ALIGNED(variable_type, variable, n) variable_type variable + #define EA_PACKED + + #ifdef __cplusplus + template struct EAAlignOf1 { enum { s = sizeof (T), value = s ^ (s & (s - 1)) }; }; + template struct EAAlignOf2; + template struct helper { template struct Val { enum { value = size_diff }; }; }; + template <> struct helper<0> { template struct Val { enum { value = EAAlignOf2::value }; }; }; + template struct EAAlignOf2 { struct Big { T x; char c; }; + enum { diff = sizeof (Big) - sizeof (T), value = helper::template Val::value }; }; + template struct EAAlignof3 { enum { x = EAAlignOf2::value, y = EAAlignOf1::value, value = x < y ? x : y }; }; + #define EA_ALIGN_OF(type) ((size_t)EAAlignof3::value) + + #else + // C implementation of EA_ALIGN_OF + // This implementation works for most cases, but doesn't directly work + // for types such as function pointer declarations. To work with those + // types you need to typedef the type and then use the typedef in EA_ALIGN_OF. + #define EA_ALIGN_OF(type) ((size_t)offsetof(struct { char c; type m; }, m)) + #endif + #endif + + // EA_PRAGMA_PACK_VC + // + // Wraps #pragma pack in a way that allows for cleaner code. + // + // Example usage: + // EA_PRAGMA_PACK_VC(push, 1) + // struct X{ char c; int i; }; + // EA_PRAGMA_PACK_VC(pop) + // + #if !defined(EA_PRAGMA_PACK_VC) + #if defined(EA_COMPILER_MSVC) + #define EA_PRAGMA_PACK_VC(...) __pragma(pack(__VA_ARGS__)) + #elif !defined(EA_COMPILER_NO_VARIADIC_MACROS) + #define EA_PRAGMA_PACK_VC(...) + #else + // No support. However, all compilers of significance to us support variadic macros. + #endif + #endif + + + // ------------------------------------------------------------------------ + // EA_LIKELY / EA_UNLIKELY + // + // Defined as a macro which gives a hint to the compiler for branch + // prediction. GCC gives you the ability to manually give a hint to + // the compiler about the result of a comparison, though it's often + // best to compile shipping code with profiling feedback under both + // GCC (-fprofile-arcs) and VC++ (/LTCG:PGO, etc.). However, there + // are times when you feel very sure that a boolean expression will + // usually evaluate to either true or false and can help the compiler + // by using an explicity directive... + // + // Example usage: + // if(EA_LIKELY(a == 0)) // Tell the compiler that a will usually equal 0. + // { ... } + // + // Example usage: + // if(EA_UNLIKELY(a == 0)) // Tell the compiler that a will usually not equal 0. + // { ... } + // + #ifndef EA_LIKELY + #if (defined(__GNUC__) && (__GNUC__ >= 3)) || defined(__clang__) + #if defined(__cplusplus) + #define EA_LIKELY(x) __builtin_expect(!!(x), true) + #define EA_UNLIKELY(x) __builtin_expect(!!(x), false) + #else + #define EA_LIKELY(x) __builtin_expect(!!(x), 1) + #define EA_UNLIKELY(x) __builtin_expect(!!(x), 0) + #endif + #else + #define EA_LIKELY(x) (x) + #define EA_UNLIKELY(x) (x) + #endif + #endif + + // ------------------------------------------------------------------------ + // EA_HAS_INCLUDE_AVAILABLE + // + // Used to guard against the EA_HAS_INCLUDE() macro on compilers that do not + // support said feature. + // + // Example usage: + // + // #if EA_HAS_INCLUDE_AVAILABLE + // #if EA_HAS_INCLUDE("myinclude.h") + // #include "myinclude.h" + // #endif + // #endif + #if !defined(EA_HAS_INCLUDE_AVAILABLE) + #if EA_COMPILER_CPP17_ENABLED || EA_COMPILER_CLANG || EA_COMPILER_GNUC + #define EA_HAS_INCLUDE_AVAILABLE 1 + #else + #define EA_HAS_INCLUDE_AVAILABLE 0 + #endif + #endif + + + // ------------------------------------------------------------------------ + // EA_HAS_INCLUDE + // + // May be used in #if and #elif expressions to test for the existence + // of the header referenced in the operand. If possible it evaluates to a + // non-zero value and zero otherwise. The operand is the same form as the file + // in a #include directive. + // + // Example usage: + // + // #if EA_HAS_INCLUDE("myinclude.h") + // #include "myinclude.h" + // #endif + // + // #if EA_HAS_INCLUDE() + // #include + // #endif + + #if !defined(EA_HAS_INCLUDE) + #if EA_COMPILER_CPP17_ENABLED + #define EA_HAS_INCLUDE(x) __has_include(x) + #elif EA_COMPILER_CLANG + #define EA_HAS_INCLUDE(x) __has_include(x) + #elif EA_COMPILER_GNUC + #define EA_HAS_INCLUDE(x) __has_include(x) + #endif + #endif + + + // ------------------------------------------------------------------------ + // EA_INIT_PRIORITY_AVAILABLE + // + // This value is either not defined, or defined to 1. + // Defines if the GCC attribute init_priority is supported by the compiler. + // + #if !defined(EA_INIT_PRIORITY_AVAILABLE) + #if defined(__GNUC__) && !defined(__EDG__) // EDG typically #defines __GNUC__ but doesn't implement init_priority. + #define EA_INIT_PRIORITY_AVAILABLE 1 + #elif defined(__clang__) + #define EA_INIT_PRIORITY_AVAILABLE 1 // Clang implements init_priority + #endif + #endif + + + // ------------------------------------------------------------------------ + // EA_INIT_PRIORITY + // + // This is simply a wrapper for the GCC init_priority attribute that allows + // multiplatform code to be easier to read. This attribute doesn't apply + // to VC++ because VC++ uses file-level pragmas to control init ordering. + // + // Example usage: + // SomeClass gSomeClass EA_INIT_PRIORITY(2000); + // + #if !defined(EA_INIT_PRIORITY) + #if defined(EA_INIT_PRIORITY_AVAILABLE) + #define EA_INIT_PRIORITY(x) __attribute__ ((init_priority (x))) + #else + #define EA_INIT_PRIORITY(x) + #endif + #endif + + + // ------------------------------------------------------------------------ + // EA_INIT_SEG_AVAILABLE + // + // + #if !defined(EA_INIT_SEG_AVAILABLE) + #if defined(_MSC_VER) + #define EA_INIT_SEG_AVAILABLE 1 + #endif + #endif + + + // ------------------------------------------------------------------------ + // EA_INIT_SEG + // + // Specifies a keyword or code section that affects the order in which startup code is executed. + // + // https://docs.microsoft.com/en-us/cpp/preprocessor/init-seg?view=vs-2019 + // + // Example: + // EA_INIT_SEG(compiler) MyType gMyTypeGlobal; + // EA_INIT_SEG("my_section") MyOtherType gMyOtherTypeGlobal; + // + #if !defined(EA_INIT_SEG) + #if defined(EA_INIT_SEG_AVAILABLE) + #define EA_INIT_SEG(x) \ + __pragma(warning(push)) __pragma(warning(disable : 4074)) __pragma(warning(disable : 4075)) __pragma(init_seg(x)) \ + __pragma(warning(pop)) + #else + #define EA_INIT_SEG(x) + #endif + #endif + + + // ------------------------------------------------------------------------ + // EA_MAY_ALIAS_AVAILABLE + // + // Defined as 0, 1, or 2. + // Defines if the GCC attribute may_alias is supported by the compiler. + // Consists of a value 0 (unsupported, shouldn't be used), 1 (some support), + // or 2 (full proper support). + // + #ifndef EA_MAY_ALIAS_AVAILABLE + #if defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 303) + #if !defined(__EDG__) // define it as 1 while defining GCC's support as 2. + #define EA_MAY_ALIAS_AVAILABLE 2 + #else + #define EA_MAY_ALIAS_AVAILABLE 0 + #endif + #else + #define EA_MAY_ALIAS_AVAILABLE 0 + #endif + #endif + + + // EA_MAY_ALIAS + // + // Defined as a macro that wraps the GCC may_alias attribute. This attribute + // has no significance for VC++ because VC++ doesn't support the concept of + // strict aliasing. Users should avoid writing code that breaks strict + // aliasing rules; EA_MAY_ALIAS is for cases with no alternative. + // + // Example usage: + // void* EA_MAY_ALIAS gPtr = NULL; + // + // Example usage: + // typedef void* EA_MAY_ALIAS pvoid_may_alias; + // pvoid_may_alias gPtr = NULL; + // + #if EA_MAY_ALIAS_AVAILABLE + #define EA_MAY_ALIAS __attribute__((__may_alias__)) + #else + #define EA_MAY_ALIAS + #endif + + + // ------------------------------------------------------------------------ + // EA_ASSUME + // + // This acts the same as the VC++ __assume directive and is implemented + // simply as a wrapper around it to allow portable usage of it and to take + // advantage of it if and when it appears in other compilers. + // + // Example usage: + // void Function(int a) { + // switch(a) { + // case 1: + // DoSomething(1); + // break; + // case 2: + // DoSomething(-1); + // break; + // default: + // EA_ASSUME(0); // This tells the optimizer that the default cannot be reached. + // } + // } + // + #ifndef EA_ASSUME + #if defined(_MSC_VER) && (_MSC_VER >= 1300) // If VC7.0 and later + #define EA_ASSUME(x) __assume(x) + #else + #define EA_ASSUME(x) + #endif + #endif + + + + // ------------------------------------------------------------------------ + // EA_ANALYSIS_ASSUME + // + // This acts the same as the VC++ __analysis_assume directive and is implemented + // simply as a wrapper around it to allow portable usage of it and to take + // advantage of it if and when it appears in other compilers. + // + // Example usage: + // char Function(char* p) { + // EA_ANALYSIS_ASSUME(p != NULL); + // return *p; + // } + // + #ifndef EA_ANALYSIS_ASSUME + #if defined(_MSC_VER) && (_MSC_VER >= 1300) // If VC7.0 and later + #define EA_ANALYSIS_ASSUME(x) __analysis_assume(!!(x)) // !! because that allows for convertible-to-bool in addition to bool. + #else + #define EA_ANALYSIS_ASSUME(x) + #endif + #endif + + + + // ------------------------------------------------------------------------ + // EA_DISABLE_VC_WARNING / EA_RESTORE_VC_WARNING + // + // Disable and re-enable warning(s) within code. + // This is simply a wrapper for VC++ #pragma warning(disable: nnnn) for the + // purpose of making code easier to read due to avoiding nested compiler ifdefs + // directly in code. + // + // Example usage: + // EA_DISABLE_VC_WARNING(4127 3244) + // + // EA_RESTORE_VC_WARNING() + // + #ifndef EA_DISABLE_VC_WARNING + #if defined(_MSC_VER) + #define EA_DISABLE_VC_WARNING(w) \ + __pragma(warning(push)) \ + __pragma(warning(disable:w)) + #else + #define EA_DISABLE_VC_WARNING(w) + #endif + #endif + + #ifndef EA_RESTORE_VC_WARNING + #if defined(_MSC_VER) + #define EA_RESTORE_VC_WARNING() \ + __pragma(warning(pop)) + #else + #define EA_RESTORE_VC_WARNING() + #endif + #endif + + + // ------------------------------------------------------------------------ + // EA_ENABLE_VC_WARNING_AS_ERROR / EA_DISABLE_VC_WARNING_AS_ERROR + // + // Disable and re-enable treating a warning as error within code. + // This is simply a wrapper for VC++ #pragma warning(error: nnnn) for the + // purpose of making code easier to read due to avoiding nested compiler ifdefs + // directly in code. + // + // Example usage: + // EA_ENABLE_VC_WARNING_AS_ERROR(4996) + // + // EA_DISABLE_VC_WARNING_AS_ERROR() + // + #ifndef EA_ENABLE_VC_WARNING_AS_ERROR + #if defined(_MSC_VER) + #define EA_ENABLE_VC_WARNING_AS_ERROR(w) \ + __pragma(warning(push)) \ + __pragma(warning(error:w)) + #else + #define EA_ENABLE_VC_WARNING_AS_ERROR(w) + #endif + #endif + + #ifndef EA_DISABLE_VC_WARNING_AS_ERROR + #if defined(_MSC_VER) + #define EA_DISABLE_VC_WARNING_AS_ERROR() \ + __pragma(warning(pop)) + #else + #define EA_DISABLE_VC_WARNING_AS_ERROR() + #endif + #endif + + + // ------------------------------------------------------------------------ + // EA_DISABLE_GCC_WARNING / EA_RESTORE_GCC_WARNING + // + // Example usage: + // // Only one warning can be ignored per statement, due to how GCC works. + // EA_DISABLE_GCC_WARNING(-Wuninitialized) + // EA_DISABLE_GCC_WARNING(-Wunused) + // + // EA_RESTORE_GCC_WARNING() + // EA_RESTORE_GCC_WARNING() + // + #ifndef EA_DISABLE_GCC_WARNING + #if defined(EA_COMPILER_GNUC) + #define EAGCCWHELP0(x) #x + #define EAGCCWHELP1(x) EAGCCWHELP0(GCC diagnostic ignored x) + #define EAGCCWHELP2(x) EAGCCWHELP1(#x) + #endif + + #if defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4006) // Can't test directly for __GNUC__ because some compilers lie. + #define EA_DISABLE_GCC_WARNING(w) \ + _Pragma("GCC diagnostic push") \ + _Pragma(EAGCCWHELP2(w)) + #elif defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4004) + #define EA_DISABLE_GCC_WARNING(w) \ + _Pragma(EAGCCWHELP2(w)) + #else + #define EA_DISABLE_GCC_WARNING(w) + #endif + #endif + + #ifndef EA_RESTORE_GCC_WARNING + #if defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4006) + #define EA_RESTORE_GCC_WARNING() \ + _Pragma("GCC diagnostic pop") + #else + #define EA_RESTORE_GCC_WARNING() + #endif + #endif + + + // ------------------------------------------------------------------------ + // EA_DISABLE_ALL_GCC_WARNINGS / EA_RESTORE_ALL_GCC_WARNINGS + // + // This isn't possible except via using _Pragma("GCC system_header"), though + // that has some limitations in how it works. Another means is to manually + // disable individual warnings within a GCC diagnostic push statement. + // GCC doesn't have as many warnings as VC++ and EDG and so this may be feasible. + // ------------------------------------------------------------------------ + + + // ------------------------------------------------------------------------ + // EA_ENABLE_GCC_WARNING_AS_ERROR / EA_DISABLE_GCC_WARNING_AS_ERROR + // + // Example usage: + // // Only one warning can be treated as an error per statement, due to how GCC works. + // EA_ENABLE_GCC_WARNING_AS_ERROR(-Wuninitialized) + // EA_ENABLE_GCC_WARNING_AS_ERROR(-Wunused) + // + // EA_DISABLE_GCC_WARNING_AS_ERROR() + // EA_DISABLE_GCC_WARNING_AS_ERROR() + // + #ifndef EA_ENABLE_GCC_WARNING_AS_ERROR + #if defined(EA_COMPILER_GNUC) + #define EAGCCWERRORHELP0(x) #x + #define EAGCCWERRORHELP1(x) EAGCCWERRORHELP0(GCC diagnostic error x) + #define EAGCCWERRORHELP2(x) EAGCCWERRORHELP1(#x) + #endif + + #if defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4006) // Can't test directly for __GNUC__ because some compilers lie. + #define EA_ENABLE_GCC_WARNING_AS_ERROR(w) \ + _Pragma("GCC diagnostic push") \ + _Pragma(EAGCCWERRORHELP2(w)) + #elif defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4004) + #define EA_DISABLE_GCC_WARNING(w) \ + _Pragma(EAGCCWERRORHELP2(w)) + #else + #define EA_DISABLE_GCC_WARNING(w) + #endif + #endif + + #ifndef EA_DISABLE_GCC_WARNING_AS_ERROR + #if defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4006) + #define EA_DISABLE_GCC_WARNING_AS_ERROR() \ + _Pragma("GCC diagnostic pop") + #else + #define EA_DISABLE_GCC_WARNING_AS_ERROR() + #endif + #endif + + + // ------------------------------------------------------------------------ + // EA_DISABLE_CLANG_WARNING / EA_RESTORE_CLANG_WARNING + // + // Example usage: + // // Only one warning can be ignored per statement, due to how clang works. + // EA_DISABLE_CLANG_WARNING(-Wuninitialized) + // EA_DISABLE_CLANG_WARNING(-Wunused) + // + // EA_RESTORE_CLANG_WARNING() + // EA_RESTORE_CLANG_WARNING() + // + #ifndef EA_DISABLE_CLANG_WARNING + #if defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_CLANG_CL) + #define EACLANGWHELP0(x) #x + #define EACLANGWHELP1(x) EACLANGWHELP0(clang diagnostic ignored x) + #define EACLANGWHELP2(x) EACLANGWHELP1(#x) + + #define EA_DISABLE_CLANG_WARNING(w) \ + _Pragma("clang diagnostic push") \ + _Pragma(EACLANGWHELP2(-Wunknown-warning-option))\ + _Pragma(EACLANGWHELP2(w)) + #else + #define EA_DISABLE_CLANG_WARNING(w) + #endif + #endif + + #ifndef EA_RESTORE_CLANG_WARNING + #if defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_CLANG_CL) + #define EA_RESTORE_CLANG_WARNING() \ + _Pragma("clang diagnostic pop") + #else + #define EA_RESTORE_CLANG_WARNING() + #endif + #endif + + + // ------------------------------------------------------------------------ + // EA_DISABLE_ALL_CLANG_WARNINGS / EA_RESTORE_ALL_CLANG_WARNINGS + // + // The situation for clang is the same as for GCC. See above. + // ------------------------------------------------------------------------ + + + // ------------------------------------------------------------------------ + // EA_ENABLE_CLANG_WARNING_AS_ERROR / EA_DISABLE_CLANG_WARNING_AS_ERROR + // + // Example usage: + // // Only one warning can be treated as an error per statement, due to how clang works. + // EA_ENABLE_CLANG_WARNING_AS_ERROR(-Wuninitialized) + // EA_ENABLE_CLANG_WARNING_AS_ERROR(-Wunused) + // + // EA_DISABLE_CLANG_WARNING_AS_ERROR() + // EA_DISABLE_CLANG_WARNING_AS_ERROR() + // + #ifndef EA_ENABLE_CLANG_WARNING_AS_ERROR + #if defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_CLANG_CL) + #define EACLANGWERRORHELP0(x) #x + #define EACLANGWERRORHELP1(x) EACLANGWERRORHELP0(clang diagnostic error x) + #define EACLANGWERRORHELP2(x) EACLANGWERRORHELP1(#x) + + #define EA_ENABLE_CLANG_WARNING_AS_ERROR(w) \ + _Pragma("clang diagnostic push") \ + _Pragma(EACLANGWERRORHELP2(w)) + #else + #define EA_DISABLE_CLANG_WARNING(w) + #endif + #endif + + #ifndef EA_DISABLE_CLANG_WARNING_AS_ERROR + #if defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_CLANG_CL) + #define EA_DISABLE_CLANG_WARNING_AS_ERROR() \ + _Pragma("clang diagnostic pop") + #else + #define EA_DISABLE_CLANG_WARNING_AS_ERROR() + #endif + #endif + + + // ------------------------------------------------------------------------ + // EA_DISABLE_SN_WARNING / EA_RESTORE_SN_WARNING + // + // Note that we define this macro specifically for the SN compiler instead of + // having a generic one for EDG-based compilers. The reason for this is that + // while SN is indeed based on EDG, SN has different warning value mappings + // and thus warning 1234 for SN is not the same as 1234 for all other EDG compilers. + // + // Example usage: + // // Currently we are limited to one warning per line. + // EA_DISABLE_SN_WARNING(1787) + // EA_DISABLE_SN_WARNING(552) + // + // EA_RESTORE_SN_WARNING() + // EA_RESTORE_SN_WARNING() + // + #ifndef EA_DISABLE_SN_WARNING + #define EA_DISABLE_SN_WARNING(w) + #endif + + #ifndef EA_RESTORE_SN_WARNING + #define EA_RESTORE_SN_WARNING() + #endif + + + // ------------------------------------------------------------------------ + // EA_DISABLE_ALL_SN_WARNINGS / EA_RESTORE_ALL_SN_WARNINGS + // + // Example usage: + // EA_DISABLE_ALL_SN_WARNINGS() + // + // EA_RESTORE_ALL_SN_WARNINGS() + // + #ifndef EA_DISABLE_ALL_SN_WARNINGS + #define EA_DISABLE_ALL_SN_WARNINGS() + #endif + + #ifndef EA_RESTORE_ALL_SN_WARNINGS + #define EA_RESTORE_ALL_SN_WARNINGS() + #endif + + + + // ------------------------------------------------------------------------ + // EA_DISABLE_GHS_WARNING / EA_RESTORE_GHS_WARNING + // + // Disable warnings from the Green Hills compiler. + // + // Example usage: + // EA_DISABLE_GHS_WARNING(193) + // EA_DISABLE_GHS_WARNING(236, 5323) + // + // EA_RESTORE_GHS_WARNING() + // EA_RESTORE_GHS_WARNING() + // + #ifndef EA_DISABLE_GHS_WARNING + #define EA_DISABLE_GHS_WARNING(w) + #endif + + #ifndef EA_RESTORE_GHS_WARNING + #define EA_RESTORE_GHS_WARNING() + #endif + + + // ------------------------------------------------------------------------ + // EA_DISABLE_ALL_GHS_WARNINGS / EA_RESTORE_ALL_GHS_WARNINGS + // + // #ifndef EA_DISABLE_ALL_GHS_WARNINGS + // #if defined(EA_COMPILER_GREEN_HILLS) + // #define EA_DISABLE_ALL_GHS_WARNINGS(w) \_ + // _Pragma("_________") + // #else + // #define EA_DISABLE_ALL_GHS_WARNINGS(w) + // #endif + // #endif + // + // #ifndef EA_RESTORE_ALL_GHS_WARNINGS + // #if defined(EA_COMPILER_GREEN_HILLS) + // #define EA_RESTORE_ALL_GHS_WARNINGS() \_ + // _Pragma("_________") + // #else + // #define EA_RESTORE_ALL_GHS_WARNINGS() + // #endif + // #endif + + + + // ------------------------------------------------------------------------ + // EA_DISABLE_EDG_WARNING / EA_RESTORE_EDG_WARNING + // + // Example usage: + // // Currently we are limited to one warning per line. + // EA_DISABLE_EDG_WARNING(193) + // EA_DISABLE_EDG_WARNING(236) + // + // EA_RESTORE_EDG_WARNING() + // EA_RESTORE_EDG_WARNING() + // + #ifndef EA_DISABLE_EDG_WARNING + // EDG-based compilers are inconsistent in how the implement warning pragmas. + #if defined(EA_COMPILER_EDG) && !defined(EA_COMPILER_INTEL) && !defined(EA_COMPILER_RVCT) + #define EAEDGWHELP0(x) #x + #define EAEDGWHELP1(x) EAEDGWHELP0(diag_suppress x) + + #define EA_DISABLE_EDG_WARNING(w) \ + _Pragma("control %push diag") \ + _Pragma(EAEDGWHELP1(w)) + #else + #define EA_DISABLE_EDG_WARNING(w) + #endif + #endif + + #ifndef EA_RESTORE_EDG_WARNING + #if defined(EA_COMPILER_EDG) && !defined(EA_COMPILER_INTEL) && !defined(EA_COMPILER_RVCT) + #define EA_RESTORE_EDG_WARNING() \ + _Pragma("control %pop diag") + #else + #define EA_RESTORE_EDG_WARNING() + #endif + #endif + + + // ------------------------------------------------------------------------ + // EA_DISABLE_ALL_EDG_WARNINGS / EA_RESTORE_ALL_EDG_WARNINGS + // + //#ifndef EA_DISABLE_ALL_EDG_WARNINGS + // #if defined(EA_COMPILER_EDG) && !defined(EA_COMPILER_SN) + // #define EA_DISABLE_ALL_EDG_WARNINGS(w) \_ + // _Pragma("_________") + // #else + // #define EA_DISABLE_ALL_EDG_WARNINGS(w) + // #endif + //#endif + // + //#ifndef EA_RESTORE_ALL_EDG_WARNINGS + // #if defined(EA_COMPILER_EDG) && !defined(EA_COMPILER_SN) + // #define EA_RESTORE_ALL_EDG_WARNINGS() \_ + // _Pragma("_________") + // #else + // #define EA_RESTORE_ALL_EDG_WARNINGS() + // #endif + //#endif + + + + // ------------------------------------------------------------------------ + // EA_DISABLE_CW_WARNING / EA_RESTORE_CW_WARNING + // + // Note that this macro can only control warnings via numbers and not by + // names. The reason for this is that the compiler's syntax for such + // warnings is not the same as for numbers. + // + // Example usage: + // // Currently we are limited to one warning per line and must also specify the warning in the restore macro. + // EA_DISABLE_CW_WARNING(10317) + // EA_DISABLE_CW_WARNING(10324) + // + // EA_RESTORE_CW_WARNING(10317) + // EA_RESTORE_CW_WARNING(10324) + // + #ifndef EA_DISABLE_CW_WARNING + #define EA_DISABLE_CW_WARNING(w) + #endif + + #ifndef EA_RESTORE_CW_WARNING + + #define EA_RESTORE_CW_WARNING(w) + + #endif + + + // ------------------------------------------------------------------------ + // EA_DISABLE_ALL_CW_WARNINGS / EA_RESTORE_ALL_CW_WARNINGS + // + #ifndef EA_DISABLE_ALL_CW_WARNINGS + #define EA_DISABLE_ALL_CW_WARNINGS() + + #endif + + #ifndef EA_RESTORE_ALL_CW_WARNINGS + #define EA_RESTORE_ALL_CW_WARNINGS() + #endif + + + + // ------------------------------------------------------------------------ + // EA_PURE + // + // This acts the same as the GCC __attribute__ ((pure)) directive and is + // implemented simply as a wrapper around it to allow portable usage of + // it and to take advantage of it if and when it appears in other compilers. + // + // A "pure" function is one that has no effects except its return value and + // its return value is a function of only the function's parameters or + // non-volatile global variables. Any parameter or global variable access + // must be read-only. Loop optimization and subexpression elimination can be + // applied to such functions. A common example is strlen(): Given identical + // inputs, the function's return value (its only effect) is invariant across + // multiple invocations and thus can be pulled out of a loop and called but once. + // + // Example usage: + // EA_PURE void Function(); + // + #ifndef EA_PURE + #if defined(EA_COMPILER_GNUC) + #define EA_PURE __attribute__((pure)) + #elif defined(EA_COMPILER_ARM) // Arm brand compiler for ARM CPU + #define EA_PURE __pure + #else + #define EA_PURE + #endif + #endif + + + + // ------------------------------------------------------------------------ + // EA_WEAK + // EA_WEAK_SUPPORTED -- defined as 0 or 1. + // + // GCC + // The weak attribute causes the declaration to be emitted as a weak + // symbol rather than a global. This is primarily useful in defining + // library functions which can be overridden in user code, though it + // can also be used with non-function declarations. + // + // VC++ + // At link time, if multiple definitions of a COMDAT are seen, the linker + // picks one and discards the rest. If the linker option /OPT:REF + // is selected, then COMDAT elimination will occur to remove all the + // unreferenced data items in the linker output. + // + // Example usage: + // EA_WEAK void Function(); + // + #ifndef EA_WEAK + #if defined(_MSC_VER) && (_MSC_VER >= 1300) // If VC7.0 and later + #define EA_WEAK __declspec(selectany) + #define EA_WEAK_SUPPORTED 1 + #elif defined(_MSC_VER) || (defined(__GNUC__) && defined(__CYGWIN__)) + #define EA_WEAK + #define EA_WEAK_SUPPORTED 0 + #elif defined(EA_COMPILER_ARM) // Arm brand compiler for ARM CPU + #define EA_WEAK __weak + #define EA_WEAK_SUPPORTED 1 + #else // GCC and IBM compilers, others. + #define EA_WEAK __attribute__((weak)) + #define EA_WEAK_SUPPORTED 1 + #endif + #endif + + + + // ------------------------------------------------------------------------ + // EA_UNUSED + // + // Makes compiler warnings about unused variables go away. + // + // Example usage: + // void Function(int x) + // { + // int y; + // EA_UNUSED(x); + // EA_UNUSED(y); + // } + // + #ifndef EA_UNUSED + // The EDG solution below is pretty weak and needs to be augmented or replaced. + // It can't handle the C language, is limited to places where template declarations + // can be used, and requires the type x to be usable as a functions reference argument. + #if defined(__cplusplus) && defined(__EDG__) + template + inline void EABaseUnused(T const volatile & x) { (void)x; } + #define EA_UNUSED(x) EABaseUnused(x) + #else + #define EA_UNUSED(x) (void)x + #endif + #endif + + + + // ------------------------------------------------------------------------ + // EA_EMPTY + // + // Allows for a null statement, usually for the purpose of avoiding compiler warnings. + // + // Example usage: + // #ifdef EA_DEBUG + // #define MyDebugPrintf(x, y) printf(x, y) + // #else + // #define MyDebugPrintf(x, y) EA_EMPTY + // #endif + // + #ifndef EA_EMPTY + #define EA_EMPTY (void)0 + #endif + + + // ------------------------------------------------------------------------ + // EA_CURRENT_FUNCTION + // + // Provides a consistent way to get the current function name as a macro + // like the __FILE__ and __LINE__ macros work. The C99 standard specifies + // that __func__ be provided by the compiler, but most compilers don't yet + // follow that convention. However, many compilers have an alternative. + // + // We also define EA_CURRENT_FUNCTION_SUPPORTED for when it is not possible + // to have EA_CURRENT_FUNCTION work as expected. + // + // Defined inside a function because otherwise the macro might not be + // defined and code below might not compile. This happens with some + // compilers. + // + #ifndef EA_CURRENT_FUNCTION + #if defined __GNUC__ || (defined __ICC && __ICC >= 600) + #define EA_CURRENT_FUNCTION __PRETTY_FUNCTION__ + #elif defined(__FUNCSIG__) + #define EA_CURRENT_FUNCTION __FUNCSIG__ + #elif (defined __INTEL_COMPILER && __INTEL_COMPILER >= 600) || (defined __IBMCPP__ && __IBMCPP__ >= 500) || (defined CS_UNDEFINED_STRING && CS_UNDEFINED_STRING >= 0x4200) + #define EA_CURRENT_FUNCTION __FUNCTION__ + #elif defined __STDC_VERSION__ && __STDC_VERSION__ >= 199901 + #define EA_CURRENT_FUNCTION __func__ + #else + #define EA_CURRENT_FUNCTION "(unknown function)" + #endif + #endif + + + // ------------------------------------------------------------------------ + // wchar_t + // Here we define: + // EA_WCHAR_T_NON_NATIVE + // EA_WCHAR_SIZE = + // + #ifndef EA_WCHAR_T_NON_NATIVE + // Compilers that always implement wchar_t as native include: + // COMEAU, new SN, and other EDG-based compilers. + // GCC + // Borland + // SunPro + // IBM Visual Age + #if defined(EA_COMPILER_INTEL) + #if (EA_COMPILER_VERSION < 700) + #define EA_WCHAR_T_NON_NATIVE 1 + #else + #if (!defined(_WCHAR_T_DEFINED) && !defined(_WCHAR_T)) + #define EA_WCHAR_T_NON_NATIVE 1 + #endif + #endif + #elif defined(EA_COMPILER_MSVC) || (defined(EA_COMPILER_CLANG) && defined(EA_PLATFORM_WINDOWS)) + #ifndef _NATIVE_WCHAR_T_DEFINED + #define EA_WCHAR_T_NON_NATIVE 1 + #endif + #elif defined(__EDG_VERSION__) && (!defined(_WCHAR_T) && (__EDG_VERSION__ < 400)) // EDG prior to v4 uses _WCHAR_T to indicate if wchar_t is native. v4+ may define something else, but we're not currently aware of it. + #define EA_WCHAR_T_NON_NATIVE 1 + #endif + #endif + + #ifndef EA_WCHAR_SIZE // If the user hasn't specified that it is a given size... + #if defined(__WCHAR_MAX__) // GCC defines this for most platforms. + #if (__WCHAR_MAX__ == 2147483647) || (__WCHAR_MAX__ == 4294967295) + #define EA_WCHAR_SIZE 4 + #elif (__WCHAR_MAX__ == 32767) || (__WCHAR_MAX__ == 65535) + #define EA_WCHAR_SIZE 2 + #elif (__WCHAR_MAX__ == 127) || (__WCHAR_MAX__ == 255) + #define EA_WCHAR_SIZE 1 + #else + #define EA_WCHAR_SIZE 4 + #endif + #elif defined(WCHAR_MAX) // The SN and Arm compilers define this. + #if (WCHAR_MAX == 2147483647) || (WCHAR_MAX == 4294967295) + #define EA_WCHAR_SIZE 4 + #elif (WCHAR_MAX == 32767) || (WCHAR_MAX == 65535) + #define EA_WCHAR_SIZE 2 + #elif (WCHAR_MAX == 127) || (WCHAR_MAX == 255) + #define EA_WCHAR_SIZE 1 + #else + #define EA_WCHAR_SIZE 4 + #endif + #elif defined(__WCHAR_BIT) // Green Hills (and other versions of EDG?) uses this. + #if (__WCHAR_BIT == 16) + #define EA_WCHAR_SIZE 2 + #elif (__WCHAR_BIT == 32) + #define EA_WCHAR_SIZE 4 + #elif (__WCHAR_BIT == 8) + #define EA_WCHAR_SIZE 1 + #else + #define EA_WCHAR_SIZE 4 + #endif + #elif defined(_WCMAX) // The SN and Arm compilers define this. + #if (_WCMAX == 2147483647) || (_WCMAX == 4294967295) + #define EA_WCHAR_SIZE 4 + #elif (_WCMAX == 32767) || (_WCMAX == 65535) + #define EA_WCHAR_SIZE 2 + #elif (_WCMAX == 127) || (_WCMAX == 255) + #define EA_WCHAR_SIZE 1 + #else + #define EA_WCHAR_SIZE 4 + #endif + #elif defined(EA_PLATFORM_UNIX) + // It is standard on Unix to have wchar_t be int32_t or uint32_t. + // All versions of GNUC default to a 32 bit wchar_t, but EA has used + // the -fshort-wchar GCC command line option to force it to 16 bit. + // If you know that the compiler is set to use a wchar_t of other than + // the default, you need to manually define EA_WCHAR_SIZE for the build. + #define EA_WCHAR_SIZE 4 + #else + // It is standard on Windows to have wchar_t be uint16_t. GCC + // defines wchar_t as int by default. Electronic Arts has + // standardized on wchar_t being an unsigned 16 bit value on all + // console platforms. Given that there is currently no known way to + // tell at preprocessor time what the size of wchar_t is, we declare + // it to be 2, as this is the Electronic Arts standard. If you have + // EA_WCHAR_SIZE != sizeof(wchar_t), then your code might not be + // broken, but it also won't work with wchar libraries and data from + // other parts of EA. Under GCC, you can force wchar_t to two bytes + // with the -fshort-wchar compiler argument. + #define EA_WCHAR_SIZE 2 + #endif + #endif + + + // ------------------------------------------------------------------------ + // EA_RESTRICT + // + // The C99 standard defines a new keyword, restrict, which allows for the + // improvement of code generation regarding memory usage. Compilers can + // generate significantly faster code when you are able to use restrict. + // + // Example usage: + // void DoSomething(char* EA_RESTRICT p1, char* EA_RESTRICT p2); + // + #ifndef EA_RESTRICT + #if defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1400) // If VC8 (VS2005) or later... + #define EA_RESTRICT __restrict + #elif defined(EA_COMPILER_CLANG) + #define EA_RESTRICT __restrict + #elif defined(EA_COMPILER_GNUC) // Includes GCC and other compilers emulating GCC. + #define EA_RESTRICT __restrict // GCC defines 'restrict' (as opposed to __restrict) in C99 mode only. + #elif defined(EA_COMPILER_ARM) + #define EA_RESTRICT __restrict + #elif defined(EA_COMPILER_IS_C99) + #define EA_RESTRICT restrict + #else + // If the compiler didn't support restricted pointers, defining EA_RESTRICT + // away would result in compiling and running fine but you just wouldn't + // the same level of optimization. On the other hand, all the major compilers + // support restricted pointers. + #define EA_RESTRICT + #endif + #endif + + + // ------------------------------------------------------------------------ + // EA_DEPRECATED // Used as a prefix. + // EA_PREFIX_DEPRECATED // You should need this only for unusual compilers. + // EA_POSTFIX_DEPRECATED // You should need this only for unusual compilers. + // EA_DEPRECATED_MESSAGE // Used as a prefix and provides a deprecation message. + // + // Example usage: + // EA_DEPRECATED void Function(); + // EA_DEPRECATED_MESSAGE("Use 1.0v API instead") void Function(); + // + // or for maximum portability: + // EA_PREFIX_DEPRECATED void Function() EA_POSTFIX_DEPRECATED; + // + + #ifndef EA_DEPRECATED + #if defined(EA_COMPILER_CPP14_ENABLED) + #define EA_DEPRECATED [[deprecated]] + #elif defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION > 1300) // If VC7 (VS2003) or later... + #define EA_DEPRECATED __declspec(deprecated) + #elif defined(EA_COMPILER_MSVC) + #define EA_DEPRECATED + #else + #define EA_DEPRECATED __attribute__((deprecated)) + #endif + #endif + + #ifndef EA_PREFIX_DEPRECATED + #if defined(EA_COMPILER_CPP14_ENABLED) + #define EA_PREFIX_DEPRECATED [[deprecated]] + #define EA_POSTFIX_DEPRECATED + #elif defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION > 1300) // If VC7 (VS2003) or later... + #define EA_PREFIX_DEPRECATED __declspec(deprecated) + #define EA_POSTFIX_DEPRECATED + #elif defined(EA_COMPILER_MSVC) + #define EA_PREFIX_DEPRECATED + #define EA_POSTFIX_DEPRECATED + #else + #define EA_PREFIX_DEPRECATED + #define EA_POSTFIX_DEPRECATED __attribute__((deprecated)) + #endif + #endif + + #ifndef EA_DEPRECATED_MESSAGE + #if defined(EA_COMPILER_CPP14_ENABLED) + #define EA_DEPRECATED_MESSAGE(msg) [[deprecated(#msg)]] + #else + // Compiler does not support depreaction messages, explicitly drop the msg but still mark the function as deprecated + #define EA_DEPRECATED_MESSAGE(msg) EA_DEPRECATED + #endif + #endif + + + // ------------------------------------------------------------------------ + // EA_FORCE_INLINE // Used as a prefix. + // EA_PREFIX_FORCE_INLINE // You should need this only for unusual compilers. + // EA_POSTFIX_FORCE_INLINE // You should need this only for unusual compilers. + // + // Example usage: + // EA_FORCE_INLINE void Foo(); // Implementation elsewhere. + // EA_PREFIX_FORCE_INLINE void Foo() EA_POSTFIX_FORCE_INLINE; // Implementation elsewhere. + // + // Note that when the prefix version of this function is used, it replaces + // the regular C++ 'inline' statement. Thus you should not use both the + // C++ inline statement and this macro with the same function declaration. + // + // To force inline usage under GCC 3.1+, you use this: + // inline void Foo() __attribute__((always_inline)); + // or + // inline __attribute__((always_inline)) void Foo(); + // + // The CodeWarrior compiler doesn't have the concept of forcing inlining per function. + // + #ifndef EA_FORCE_INLINE + #if defined(EA_COMPILER_MSVC) + #define EA_FORCE_INLINE __forceinline + #elif defined(EA_COMPILER_GNUC) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 301) || defined(EA_COMPILER_CLANG) + #if defined(__cplusplus) + #define EA_FORCE_INLINE inline __attribute__((always_inline)) + #else + #define EA_FORCE_INLINE __inline__ __attribute__((always_inline)) + #endif + #else + #if defined(__cplusplus) + #define EA_FORCE_INLINE inline + #else + #define EA_FORCE_INLINE __inline + #endif + #endif + #endif + + #if defined(EA_COMPILER_GNUC) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 301) || defined(EA_COMPILER_CLANG) + #define EA_PREFIX_FORCE_INLINE inline + #define EA_POSTFIX_FORCE_INLINE __attribute__((always_inline)) + #else + #define EA_PREFIX_FORCE_INLINE inline + #define EA_POSTFIX_FORCE_INLINE + #endif + + + // ------------------------------------------------------------------------ + // EA_FORCE_INLINE_LAMBDA + // + // EA_FORCE_INLINE_LAMBDA is used to force inline a call to a lambda when possible. + // Force inlining a lambda can be useful to reduce overhead in situations where a lambda may + // may only be called once, or inlining allows the compiler to apply other optimizations that wouldn't + // otherwise be possible. + // + // The ability to force inline a lambda is currently only available on a subset of compilers. + // + // Example usage: + // + // auto lambdaFunction = []() EA_FORCE_INLINE_LAMBDA + // { + // }; + // + #ifndef EA_FORCE_INLINE_LAMBDA + #if defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG) + #define EA_FORCE_INLINE_LAMBDA __attribute__((always_inline)) + #else + #define EA_FORCE_INLINE_LAMBDA + #endif + #endif + + + // ------------------------------------------------------------------------ + // EA_NO_INLINE // Used as a prefix. + // EA_PREFIX_NO_INLINE // You should need this only for unusual compilers. + // EA_POSTFIX_NO_INLINE // You should need this only for unusual compilers. + // + // Example usage: + // EA_NO_INLINE void Foo(); // Implementation elsewhere. + // EA_PREFIX_NO_INLINE void Foo() EA_POSTFIX_NO_INLINE; // Implementation elsewhere. + // + // That this declaration is incompatbile with C++ 'inline' and any + // variant of EA_FORCE_INLINE. + // + // To disable inline usage under VC++ priof to VS2005, you need to use this: + // #pragma inline_depth(0) // Disable inlining. + // void Foo() { ... } + // #pragma inline_depth() // Restore to default. + // + // Since there is no easy way to disable inlining on a function-by-function + // basis in VC++ prior to VS2005, the best strategy is to write platform-specific + // #ifdefs in the code or to disable inlining for a given module and enable + // functions individually with EA_FORCE_INLINE. + // + #ifndef EA_NO_INLINE + #if defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1400) // If VC8 (VS2005) or later... + #define EA_NO_INLINE __declspec(noinline) + #elif defined(EA_COMPILER_MSVC) + #define EA_NO_INLINE + #else + #define EA_NO_INLINE __attribute__((noinline)) + #endif + #endif + + #if defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1400) // If VC8 (VS2005) or later... + #define EA_PREFIX_NO_INLINE __declspec(noinline) + #define EA_POSTFIX_NO_INLINE + #elif defined(EA_COMPILER_MSVC) + #define EA_PREFIX_NO_INLINE + #define EA_POSTFIX_NO_INLINE + #else + #define EA_PREFIX_NO_INLINE + #define EA_POSTFIX_NO_INLINE __attribute__((noinline)) + #endif + + + // ------------------------------------------------------------------------ + // EA_NO_VTABLE + // + // Example usage: + // class EA_NO_VTABLE X { + // virtual void InterfaceFunction(); + // }; + // + // EA_CLASS_NO_VTABLE(X) { + // virtual void InterfaceFunction(); + // }; + // + #ifdef EA_COMPILER_MSVC + #define EA_NO_VTABLE __declspec(novtable) + #define EA_CLASS_NO_VTABLE(x) class __declspec(novtable) x + #define EA_STRUCT_NO_VTABLE(x) struct __declspec(novtable) x + #else + #define EA_NO_VTABLE + #define EA_CLASS_NO_VTABLE(x) class x + #define EA_STRUCT_NO_VTABLE(x) struct x + #endif + + + // ------------------------------------------------------------------------ + // EA_PASCAL + // + // Also known on PC platforms as stdcall. + // This convention causes the compiler to assume that the called function + // will pop off the stack space used to pass arguments, unless it takes a + // variable number of arguments. + // + // Example usage: + // this: + // void DoNothing(int x); + // void DoNothing(int x){} + // would be written as this: + // void EA_PASCAL_FUNC(DoNothing(int x)); + // void EA_PASCAL_FUNC(DoNothing(int x)){} + // + #ifndef EA_PASCAL + #if defined(EA_COMPILER_MSVC) + #define EA_PASCAL __stdcall + #elif defined(EA_COMPILER_GNUC) && defined(EA_PROCESSOR_X86) + #define EA_PASCAL __attribute__((stdcall)) + #else + // Some compilers simply don't support pascal calling convention. + // As a result, there isn't an issue here, since the specification of + // pascal calling convention is for the purpose of disambiguating the + // calling convention that is applied. + #define EA_PASCAL + #endif + #endif + + #ifndef EA_PASCAL_FUNC + #if defined(EA_COMPILER_MSVC) + #define EA_PASCAL_FUNC(funcname_and_paramlist) __stdcall funcname_and_paramlist + #elif defined(EA_COMPILER_GNUC) && defined(EA_PROCESSOR_X86) + #define EA_PASCAL_FUNC(funcname_and_paramlist) __attribute__((stdcall)) funcname_and_paramlist + #else + #define EA_PASCAL_FUNC(funcname_and_paramlist) funcname_and_paramlist + #endif + #endif + + + // ------------------------------------------------------------------------ + // EA_SSE + // Visual C Processor Packs define _MSC_FULL_VER and are needed for SSE + // Intel C also has SSE support. + // EA_SSE is used to select FPU or SSE versions in hw_select.inl + // + // EA_SSE defines the level of SSE support: + // 0 indicates no SSE support + // 1 indicates SSE1 is supported + // 2 indicates SSE2 is supported + // 3 indicates SSE3 (or greater) is supported + // + // Note: SSE support beyond SSE3 can't be properly represented as a single + // version number. Instead users should use specific SSE defines (e.g. + // EA_SSE4_2) to detect what specific support is available. EA_SSE being + // equal to 3 really only indicates that SSE3 or greater is supported. + #ifndef EA_SSE + #if defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG) + #if defined(__SSE3__) + #define EA_SSE 3 + #elif defined(__SSE2__) + #define EA_SSE 2 + #elif defined(__SSE__) && __SSE__ + #define EA_SSE 1 + #else + #define EA_SSE 0 + #endif + #elif (defined(EA_SSE3) && EA_SSE3) || defined EA_PLATFORM_XBOXONE || defined CS_UNDEFINED_STRING + #define EA_SSE 3 + #elif defined(EA_SSE2) && EA_SSE2 + #define EA_SSE 2 + #elif defined(EA_PROCESSOR_X86) && defined(_MSC_FULL_VER) && !defined(__NOSSE__) && defined(_M_IX86_FP) + #define EA_SSE _M_IX86_FP + #elif defined(EA_PROCESSOR_X86) && defined(EA_COMPILER_INTEL) && !defined(__NOSSE__) + #define EA_SSE 1 + #elif defined(EA_PROCESSOR_X86_64) + // All x64 processors support SSE2 or higher + #define EA_SSE 2 + #else + #define EA_SSE 0 + #endif + #endif + + // ------------------------------------------------------------------------ + // We define separate defines for SSE support beyond SSE1. These defines + // are particularly useful for detecting SSE4.x features since there isn't + // a single concept of SSE4. + // + // The following SSE defines are always defined. 0 indicates the + // feature/level of SSE is not supported, and 1 indicates support is + // available. + #ifndef EA_SSE2 + #if EA_SSE >= 2 + #define EA_SSE2 1 + #else + #define EA_SSE2 0 + #endif + #endif + #ifndef EA_SSE3 + #if EA_SSE >= 3 + #define EA_SSE3 1 + #else + #define EA_SSE3 0 + #endif + #endif + #ifndef EA_SSSE3 + #if defined __SSSE3__ || defined EA_PLATFORM_XBOXONE || defined CS_UNDEFINED_STRING + #define EA_SSSE3 1 + #else + #define EA_SSSE3 0 + #endif + #endif + #ifndef EA_SSE4_1 + #if defined __SSE4_1__ || defined EA_PLATFORM_XBOXONE || defined CS_UNDEFINED_STRING + #define EA_SSE4_1 1 + #else + #define EA_SSE4_1 0 + #endif + #endif + #ifndef EA_SSE4_2 + #if defined __SSE4_2__ || defined EA_PLATFORM_XBOXONE || defined CS_UNDEFINED_STRING + #define EA_SSE4_2 1 + #else + #define EA_SSE4_2 0 + #endif + #endif + #ifndef EA_SSE4A + #if defined __SSE4A__ || defined EA_PLATFORM_XBOXONE || defined CS_UNDEFINED_STRING + #define EA_SSE4A 1 + #else + #define EA_SSE4A 0 + #endif + #endif + + // ------------------------------------------------------------------------ + // EA_AVX + // EA_AVX may be used to determine if Advanced Vector Extensions are available for the target architecture + // + // EA_AVX defines the level of AVX support: + // 0 indicates no AVX support + // 1 indicates AVX1 is supported + // 2 indicates AVX2 is supported + #ifndef EA_AVX + #if defined __AVX2__ + #define EA_AVX 2 + #elif defined __AVX__ || defined EA_PLATFORM_XBOXONE || defined CS_UNDEFINED_STRING + #define EA_AVX 1 + #else + #define EA_AVX 0 + #endif + #endif + #ifndef EA_AVX2 + #if EA_AVX >= 2 + #define EA_AVX2 1 + #else + #define EA_AVX2 0 + #endif + #endif + + // EA_FP16C may be used to determine the existence of float <-> half conversion operations on an x86 CPU. + // (For example to determine if _mm_cvtph_ps or _mm_cvtps_ph could be used.) + #ifndef EA_FP16C + #if defined __F16C__ || defined EA_PLATFORM_XBOXONE || defined CS_UNDEFINED_STRING + #define EA_FP16C 1 + #else + #define EA_FP16C 0 + #endif + #endif + + // EA_FP128 may be used to determine if __float128 is a supported type for use. This type is enabled by a GCC extension (_GLIBCXX_USE_FLOAT128) + // but has support by some implementations of clang (__FLOAT128__) + // PS4 does not support __float128 as of SDK 5.500 https://ps4.siedev.net/resources/documents/SDK/5.500/CPU_Compiler_ABI-Overview/0003.html + #ifndef EA_FP128 + #if (defined __FLOAT128__ || defined _GLIBCXX_USE_FLOAT128) && !defined(EA_PLATFORM_SONY) + #define EA_FP128 1 + #else + #define EA_FP128 0 + #endif + #endif + + // ------------------------------------------------------------------------ + // EA_ABM + // EA_ABM may be used to determine if Advanced Bit Manipulation sets are available for the target architecture (POPCNT, LZCNT) + // + #ifndef EA_ABM + #if defined(__ABM__) || defined(EA_PLATFORM_XBOXONE) || defined(EA_PLATFORM_SONY) || defined(CS_UNDEFINED_STRING) + #define EA_ABM 1 + #else + #define EA_ABM 0 + #endif + #endif + + // ------------------------------------------------------------------------ + // EA_NEON + // EA_NEON may be used to determine if NEON is supported. + #ifndef EA_NEON + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + #define EA_NEON 1 + #else + #define EA_NEON 0 + #endif + #endif + + // ------------------------------------------------------------------------ + // EA_BMI + // EA_BMI may be used to determine if Bit Manipulation Instruction sets are available for the target architecture + // + // EA_BMI defines the level of BMI support: + // 0 indicates no BMI support + // 1 indicates BMI1 is supported + // 2 indicates BMI2 is supported + #ifndef EA_BMI + #if defined(__BMI2__) + #define EA_BMI 2 + #elif defined(__BMI__) || defined(EA_PLATFORM_XBOXONE) || defined(CS_UNDEFINED_STRING) + #define EA_BMI 1 + #else + #define EA_BMI 0 + #endif + #endif + #ifndef EA_BMI2 + #if EA_BMI >= 2 + #define EA_BMI2 1 + #else + #define EA_BMI2 0 + #endif + #endif + + // ------------------------------------------------------------------------ + // EA_FMA3 + // EA_FMA3 may be used to determine if Fused Multiply Add operations are available for the target architecture + // __FMA__ is defined only by GCC, Clang, and ICC; MSVC only defines __AVX__ and __AVX2__ + // FMA3 was introduced alongside AVX2 on Intel Haswell + // All AMD processors support FMA3 if AVX2 is also supported + // + // EA_FMA3 defines the level of FMA3 support: + // 0 indicates no FMA3 support + // 1 indicates FMA3 is supported + #ifndef EA_FMA3 + #if defined(__FMA__) || EA_AVX2 >= 1 + #define EA_FMA3 1 + #else + #define EA_FMA3 0 + #endif + #endif + + // ------------------------------------------------------------------------ + // EA_TBM + // EA_TBM may be used to determine if Trailing Bit Manipulation instructions are available for the target architecture + #ifndef EA_TBM + #if defined(__TBM__) + #define EA_TBM 1 + #else + #define EA_TBM 0 + #endif + #endif + + + // ------------------------------------------------------------------------ + // EA_IMPORT + // import declaration specification + // specifies that the declared symbol is imported from another dynamic library. + #ifndef EA_IMPORT + #if defined(EA_COMPILER_MSVC) + #define EA_IMPORT __declspec(dllimport) + #else + #define EA_IMPORT + #endif + #endif + + + // ------------------------------------------------------------------------ + // EA_EXPORT + // export declaration specification + // specifies that the declared symbol is exported from the current dynamic library. + // this is not the same as the C++ export keyword. The C++ export keyword has been + // removed from the language as of C++11. + #ifndef EA_EXPORT + #if defined(EA_COMPILER_MSVC) + #define EA_EXPORT __declspec(dllexport) + #else + #define EA_EXPORT + #endif + #endif + + + // ------------------------------------------------------------------------ + // EA_PRAGMA_ONCE_SUPPORTED + // + // This is a wrapper for the #pragma once preprocessor directive. + // It allows for some compilers (in particular VC++) to implement signifcantly + // faster include file preprocessing. #pragma once can be used to replace + // header include guards or to augment them. However, #pragma once isn't + // necessarily supported by all compilers and isn't guaranteed to be so in + // the future, so using #pragma once to replace traditional include guards + // is not strictly portable. Note that a direct #define for #pragma once is + // impossible with VC++, due to limitations, but can be done with other + // compilers/preprocessors via _Pragma("once"). + // + // Example usage (which includes traditional header guards for portability): + // #ifndef SOMEPACKAGE_SOMEHEADER_H + // #define SOMEPACKAGE_SOMEHEADER_H + // + // #if defined(EA_PRAGMA_ONCE_SUPPORTED) + // #pragma once + // #endif + // + // + // + // #endif + // + #if defined(_MSC_VER) || defined(__GNUC__) || defined(__EDG__) || defined(__APPLE__) + #define EA_PRAGMA_ONCE_SUPPORTED 1 + #endif + + + + // ------------------------------------------------------------------------ + // EA_ONCE + // + // Example usage (which includes traditional header guards for portability): + // #ifndef SOMEPACKAGE_SOMEHEADER_H + // #define SOMEPACKAGE_SOMEHEADER_H + // + // EA_ONCE() + // + // + // + // #endif + // + #if defined(EA_PRAGMA_ONCE_SUPPORTED) + #if defined(_MSC_VER) + #define EA_ONCE() __pragma(once) + #else + #define EA_ONCE() // _Pragma("once") It turns out that _Pragma("once") isn't supported by many compilers. + #endif + #endif + + + + // ------------------------------------------------------------------------ + // EA_OVERRIDE + // + // C++11 override + // See http://msdn.microsoft.com/en-us/library/jj678987.aspx for more information. + // You can use EA_FINAL_OVERRIDE to combine usage of EA_OVERRIDE and EA_INHERITANCE_FINAL in a single statement. + // + // Example usage: + // struct B { virtual void f(int); }; + // struct D : B { void f(int) EA_OVERRIDE; }; + // + #ifndef EA_OVERRIDE + #if defined(EA_COMPILER_NO_OVERRIDE) + #define EA_OVERRIDE + #else + #define EA_OVERRIDE override + #endif + #endif + + + // ------------------------------------------------------------------------ + // EA_INHERITANCE_FINAL + // + // Portably wraps the C++11 final specifier. + // See http://msdn.microsoft.com/en-us/library/jj678985.aspx for more information. + // You can use EA_FINAL_OVERRIDE to combine usage of EA_OVERRIDE and EA_INHERITANCE_FINAL in a single statement. + // This is not called EA_FINAL because that term is used within EA to denote debug/release/final builds. + // + // Example usage: + // struct B { virtual void f() EA_INHERITANCE_FINAL; }; + // + #ifndef EA_INHERITANCE_FINAL + #if defined(EA_COMPILER_NO_INHERITANCE_FINAL) + #define EA_INHERITANCE_FINAL + #elif (defined(_MSC_VER) && (EA_COMPILER_VERSION < 1700)) // Pre-VS2012 + #define EA_INHERITANCE_FINAL sealed + #else + #define EA_INHERITANCE_FINAL final + #endif + #endif + + + // ------------------------------------------------------------------------ + // EA_FINAL_OVERRIDE + // + // Portably wraps the C++11 override final specifiers combined. + // + // Example usage: + // struct A { virtual void f(); }; + // struct B : public A { virtual void f() EA_FINAL_OVERRIDE; }; + // + #ifndef EA_FINAL_OVERRIDE + #define EA_FINAL_OVERRIDE EA_OVERRIDE EA_INHERITANCE_FINAL + #endif + + + // ------------------------------------------------------------------------ + // EA_SEALED + // + // This is deprecated, as the C++11 Standard has final (EA_INHERITANCE_FINAL) instead. + // See http://msdn.microsoft.com/en-us/library/0w2w91tf.aspx for more information. + // Example usage: + // struct B { virtual void f() EA_SEALED; }; + // + #ifndef EA_SEALED + #if defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1400) // VS2005 (VC8) and later + #define EA_SEALED sealed + #else + #define EA_SEALED + #endif + #endif + + + // ------------------------------------------------------------------------ + // EA_ABSTRACT + // + // This is a Microsoft language extension. + // See http://msdn.microsoft.com/en-us/library/b0z6b513.aspx for more information. + // Example usage: + // struct X EA_ABSTRACT { virtual void f(){} }; + // + #ifndef EA_ABSTRACT + #if defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1400) // VS2005 (VC8) and later + #define EA_ABSTRACT abstract + #else + #define EA_ABSTRACT + #endif + #endif + + + // ------------------------------------------------------------------------ + // EA_CONSTEXPR + // EA_CONSTEXPR_OR_CONST + // + // Portable wrapper for C++11's 'constexpr' support. + // + // See http://www.cprogramming.com/c++11/c++11-compile-time-processing-with-constexpr.html for more information. + // Example usage: + // EA_CONSTEXPR int GetValue() { return 37; } + // EA_CONSTEXPR_OR_CONST double gValue = std::sin(kTwoPi); + // + #if !defined(EA_CONSTEXPR) + #if defined(EA_COMPILER_NO_CONSTEXPR) + #define EA_CONSTEXPR + #else + #define EA_CONSTEXPR constexpr + #endif + #endif + + #if !defined(EA_CONSTEXPR_OR_CONST) + #if defined(EA_COMPILER_NO_CONSTEXPR) + #define EA_CONSTEXPR_OR_CONST const + #else + #define EA_CONSTEXPR_OR_CONST constexpr + #endif + #endif + + // ------------------------------------------------------------------------ + // EA_CONSTEXPR_IF + // + // Portable wrapper for C++17's 'constexpr if' support. + // + // https://en.cppreference.com/w/cpp/language/if + // + // Example usage: + // + // EA_CONSTEXPR_IF(eastl::is_copy_constructible_v) + // { ... } + // + #if !defined(EA_CONSTEXPR_IF) + #if defined(EA_COMPILER_NO_CONSTEXPR_IF) + #define EA_CONSTEXPR_IF(predicate) if ((predicate)) + #else + #define EA_CONSTEXPR_IF(predicate) if constexpr ((predicate)) + #endif + #endif + + + + // ------------------------------------------------------------------------ + // EA_EXTERN_TEMPLATE + // + // Portable wrapper for C++11's 'extern template' support. + // + // Example usage: + // EA_EXTERN_TEMPLATE(class basic_string); + // + #if !defined(EA_EXTERN_TEMPLATE) + #if defined(EA_COMPILER_NO_EXTERN_TEMPLATE) + #define EA_EXTERN_TEMPLATE(declaration) + #else + #define EA_EXTERN_TEMPLATE(declaration) extern template declaration + #endif + #endif + + + // ------------------------------------------------------------------------ + // EA_NOEXCEPT + // EA_NOEXCEPT_IF(predicate) + // EA_NOEXCEPT_EXPR(expression) + // + // Portable wrapper for C++11 noexcept + // http://en.cppreference.com/w/cpp/language/noexcept + // http://en.cppreference.com/w/cpp/language/noexcept_spec + // + // Example usage: + // EA_NOEXCEPT + // EA_NOEXCEPT_IF(predicate) + // EA_NOEXCEPT_EXPR(expression) + // + // This function never throws an exception. + // void DoNothing() EA_NOEXCEPT + // { } + // + // This function throws an exception of T::T() throws an exception. + // template + // void DoNothing() EA_NOEXCEPT_IF(EA_NOEXCEPT_EXPR(T())) + // { T t; } + // + #if !defined(EA_NOEXCEPT) + #if defined(EA_COMPILER_NO_NOEXCEPT) + #define EA_NOEXCEPT + #define EA_NOEXCEPT_IF(predicate) + #define EA_NOEXCEPT_EXPR(expression) false + #else + #define EA_NOEXCEPT noexcept + #define EA_NOEXCEPT_IF(predicate) noexcept((predicate)) + #define EA_NOEXCEPT_EXPR(expression) noexcept((expression)) + #endif + #endif + + + // ------------------------------------------------------------------------ + // EA_NORETURN + // + // Wraps the C++11 noreturn attribute. See EA_COMPILER_NO_NORETURN + // http://en.cppreference.com/w/cpp/language/attributes + // http://msdn.microsoft.com/en-us/library/k6ktzx3s%28v=vs.80%29.aspx + // http://blog.aaronballman.com/2011/09/understanding-attributes/ + // + // Example usage: + // EA_NORETURN void SomeFunction() + // { throw "error"; } + // + #if !defined(EA_NORETURN) + #if defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1300) // VS2003 (VC7) and later + #define EA_NORETURN __declspec(noreturn) + #elif defined(EA_COMPILER_NO_NORETURN) + #define EA_NORETURN + #else + #define EA_NORETURN [[noreturn]] + #endif + #endif + + + // ------------------------------------------------------------------------ + // EA_CARRIES_DEPENDENCY + // + // Wraps the C++11 carries_dependency attribute + // http://en.cppreference.com/w/cpp/language/attributes + // http://blog.aaronballman.com/2011/09/understanding-attributes/ + // + // Example usage: + // EA_CARRIES_DEPENDENCY int* SomeFunction() + // { return &mX; } + // + // + #if !defined(EA_CARRIES_DEPENDENCY) + #if defined(EA_COMPILER_NO_CARRIES_DEPENDENCY) + #define EA_CARRIES_DEPENDENCY + #else + #define EA_CARRIES_DEPENDENCY [[carries_dependency]] + #endif + #endif + + + // ------------------------------------------------------------------------ + // EA_FALLTHROUGH + // + // [[fallthrough] is a C++17 standard attribute that appears in switch + // statements to indicate that the fallthrough from the previous case in the + // switch statement is intentially and not a bug. + // + // http://en.cppreference.com/w/cpp/language/attributes + // + // Example usage: + // void f(int n) + // { + // switch(n) + // { + // case 1: + // DoCase1(); + // // Compiler may generate a warning for fallthrough behaviour + // + // case 2: + // DoCase2(); + // + // EA_FALLTHROUGH; + // case 3: + // DoCase3(); + // } + // } + // + #if !defined(EA_FALLTHROUGH) + #if defined(EA_COMPILER_NO_FALLTHROUGH) + #define EA_FALLTHROUGH + #else + #define EA_FALLTHROUGH [[fallthrough]] + #endif + #endif + + + + // ------------------------------------------------------------------------ + // EA_NODISCARD + // + // [[nodiscard]] is a C++17 standard attribute that can be applied to a + // function declaration, enum, or class declaration. If a any of the list + // previously are returned from a function (without the user explicitly + // casting to void) the addition of the [[nodiscard]] attribute encourages + // the compiler to generate a warning about the user discarding the return + // value. This is a useful practice to encourage client code to check API + // error codes. + // + // http://en.cppreference.com/w/cpp/language/attributes + // + // Example usage: + // + // EA_NODISCARD int baz() { return 42; } + // + // void foo() + // { + // baz(); // warning: ignoring return value of function declared with 'nodiscard' attribute + // } + // + #if !defined(EA_NODISCARD) + #if defined(EA_COMPILER_NO_NODISCARD) + #define EA_NODISCARD + #else + #define EA_NODISCARD [[nodiscard]] + #endif + #endif + + + // ------------------------------------------------------------------------ + // EA_MAYBE_UNUSED + // + // [[maybe_unused]] is a C++17 standard attribute that suppresses warnings + // on unused entities that are declared as maybe_unused. + // + // http://en.cppreference.com/w/cpp/language/attributes + // + // Example usage: + // void foo(EA_MAYBE_UNUSED int i) + // { + // assert(i == 42); // warning suppressed when asserts disabled. + // } + // + #if !defined(EA_MAYBE_UNUSED) + #if defined(EA_COMPILER_NO_MAYBE_UNUSED) + #define EA_MAYBE_UNUSED + #else + #define EA_MAYBE_UNUSED [[maybe_unused]] + #endif + #endif + + + // ------------------------------------------------------------------------ + // EA_NO_UBSAN + // + // The LLVM/Clang undefined behaviour sanitizer will not analyse a function tagged with the following attribute. + // + // https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html#disabling-instrumentation-with-attribute-no-sanitize-undefined + // + // Example usage: + // EA_NO_UBSAN int SomeFunction() { ... } + // + #ifndef EA_NO_UBSAN + #if defined(EA_COMPILER_CLANG) + #define EA_NO_UBSAN __attribute__((no_sanitize("undefined"))) + #else + #define EA_NO_UBSAN + #endif + #endif + + + // ------------------------------------------------------------------------ + // EA_NO_ASAN + // + // The LLVM/Clang address sanitizer will not analyse a function tagged with the following attribute. + // + // https://clang.llvm.org/docs/AddressSanitizer.html#disabling-instrumentation-with-attribute-no-sanitize-address + // + // Example usage: + // EA_NO_ASAN int SomeFunction() { ... } + // + #ifndef EA_NO_ASAN + #if defined(EA_COMPILER_CLANG) + #define EA_NO_ASAN __attribute__((no_sanitize("address"))) + #else + #define EA_NO_ASAN + #endif + #endif + + + // ------------------------------------------------------------------------ + // EA_ASAN_ENABLED + // + // Defined as 0 or 1. It's value depends on the compile environment. + // Specifies whether the code is being built with Clang's Address Sanitizer. + // + #if defined(__has_feature) + #if __has_feature(address_sanitizer) + #define EA_ASAN_ENABLED 1 + #else + #define EA_ASAN_ENABLED 0 + #endif + #else + #define EA_ASAN_ENABLED 0 + #endif + + ///////////////////////////////////////////////////////////////////////////////// + // + // EA_UBSAN_ENABLED + // + // Defined as 0 or 1. It's value depends on the compile environment. + // Specifies whether the code is being built with Undefined Behavior Sanitizer. + // + #if EA_COMPILER_HAS_FEATURE(undefined_behavior_sanitizer) + #define EA_UBSAN_ENABLED 1 + #else + #define EA_UBSAN_ENABLED 0 + #endif + + ///////////////////////////////////////////////////////////////////////////////// + // + // EA_MSAN_ENABLED + // + // Defined as 0 or 1. It's value depends on the compile environment. + // Specifies whether the code is being built with Memory Sanitizer. + // + // MSAN documentation: + // https://clang.llvm.org/docs/MemorySanitizer.html + // https://github.com/google/sanitizers/wiki/MemorySanitizer + // + #if EA_COMPILER_HAS_FEATURE(memory_sanitizer) + #define EA_MSAN_ENABLED 1 + #else + #define EA_MSAN_ENABLED 0 + #endif + + // ------------------------------------------------------------------------ + // EA_NON_COPYABLE + // + // This macro defines as a class as not being copy-constructable + // or assignable. This is useful for preventing class instances + // from being passed to functions by value, is useful for preventing + // compiler warnings by some compilers about the inability to + // auto-generate a copy constructor and assignment, and is useful + // for simply declaring in the interface that copy semantics are + // not supported by the class. Your class needs to have at least a + // default constructor when using this macro. + // + // Beware that this class works by declaring a private: section of + // the class in the case of compilers that don't support C++11 deleted + // functions. + // + // Note: With some pre-C++11 compilers (e.g. Green Hills), you may need + // to manually define an instances of the hidden functions, even + // though they are not used. + // + // Example usage: + // class Widget { + // Widget(); + // . . . + // EA_NON_COPYABLE(Widget) + // }; + // + #if !defined(EA_NON_COPYABLE) + #if defined(EA_COMPILER_NO_DELETED_FUNCTIONS) + #define EA_NON_COPYABLE(EAClass_) \ + private: \ + EA_DISABLE_VC_WARNING(4822); /* local class member function does not have a body */ \ + EAClass_(const EAClass_&); \ + void operator=(const EAClass_&); \ + EA_RESTORE_VC_WARNING(); + #else + #define EA_NON_COPYABLE(EAClass_) \ + EA_DISABLE_VC_WARNING(4822); /* local class member function does not have a body */ \ + EAClass_(const EAClass_&) = delete; \ + void operator=(const EAClass_&) = delete; \ + EA_RESTORE_VC_WARNING(); + #endif + #endif + + + // ------------------------------------------------------------------------ + // EA_FUNCTION_DELETE + // + // Semi-portable way of specifying a deleted function which allows for + // cleaner code in class declarations. + // + // Example usage: + // + // class Example + // { + // private: // For portability with pre-C++11 compilers, make the function private. + // void foo() EA_FUNCTION_DELETE; + // }; + // + // Note: EA_FUNCTION_DELETE'd functions should be private to prevent the + // functions from being called even when the compiler does not support + // deleted functions. Some compilers (e.g. Green Hills) that don't support + // C++11 deleted functions can require that you define the function, + // which you can do in the associated source file for the class. + // + #if defined(EA_COMPILER_NO_DELETED_FUNCTIONS) + #define EA_FUNCTION_DELETE + #else + #define EA_FUNCTION_DELETE = delete + #endif + + // ------------------------------------------------------------------------ + // EA_DISABLE_DEFAULT_CTOR + // + // Disables the compiler generated default constructor. This macro is + // provided to improve portability and clarify intent of code. + // + // Example usage: + // + // class Example + // { + // private: + // EA_DISABLE_DEFAULT_CTOR(Example); + // }; + // + #define EA_DISABLE_DEFAULT_CTOR(ClassName) ClassName() EA_FUNCTION_DELETE + + // ------------------------------------------------------------------------ + // EA_DISABLE_COPY_CTOR + // + // Disables the compiler generated copy constructor. This macro is + // provided to improve portability and clarify intent of code. + // + // Example usage: + // + // class Example + // { + // private: + // EA_DISABLE_COPY_CTOR(Example); + // }; + // + #define EA_DISABLE_COPY_CTOR(ClassName) ClassName(const ClassName &) EA_FUNCTION_DELETE + + // ------------------------------------------------------------------------ + // EA_DISABLE_MOVE_CTOR + // + // Disables the compiler generated move constructor. This macro is + // provided to improve portability and clarify intent of code. + // + // Example usage: + // + // class Example + // { + // private: + // EA_DISABLE_MOVE_CTOR(Example); + // }; + // + #define EA_DISABLE_MOVE_CTOR(ClassName) ClassName(ClassName&&) EA_FUNCTION_DELETE + + // ------------------------------------------------------------------------ + // EA_DISABLE_ASSIGNMENT_OPERATOR + // + // Disables the compiler generated assignment operator. This macro is + // provided to improve portability and clarify intent of code. + // + // Example usage: + // + // class Example + // { + // private: + // EA_DISABLE_ASSIGNMENT_OPERATOR(Example); + // }; + // + #define EA_DISABLE_ASSIGNMENT_OPERATOR(ClassName) ClassName & operator=(const ClassName &) EA_FUNCTION_DELETE + + // ------------------------------------------------------------------------ + // EA_DISABLE_MOVE_OPERATOR + // + // Disables the compiler generated move operator. This macro is + // provided to improve portability and clarify intent of code. + // + // Example usage: + // + // class Example + // { + // private: + // EA_DISABLE_MOVE_OPERATOR(Example); + // }; + // + #define EA_DISABLE_MOVE_OPERATOR(ClassName) ClassName & operator=(ClassName&&) EA_FUNCTION_DELETE + + // ------------------------------------------------------------------------ + // EANonCopyable + // + // Declares a class as not supporting copy construction or assignment. + // May be more reliable with some situations that EA_NON_COPYABLE alone, + // though it may result in more code generation. + // + // Note that VC++ will generate warning C4625 and C4626 if you use EANonCopyable + // and you are compiling with /W4 and /Wall. There is no resolution but + // to redelare EA_NON_COPYABLE in your subclass or disable the warnings with + // code like this: + // EA_DISABLE_VC_WARNING(4625 4626) + // ... + // EA_RESTORE_VC_WARNING() + // + // Example usage: + // struct Widget : EANonCopyable { + // . . . + // }; + // + #ifdef __cplusplus + struct EANonCopyable + { + #if defined(EA_COMPILER_NO_DEFAULTED_FUNCTIONS) || defined(__EDG__) + // EDG doesn't appear to behave properly for the case of defaulted constructors; + // it generates a mistaken warning about missing default constructors. + EANonCopyable() {} // Putting {} here has the downside that it allows a class to create itself, + ~EANonCopyable() {} // but avoids linker errors that can occur with some compilers (e.g. Green Hills). + #else + EANonCopyable() = default; + ~EANonCopyable() = default; + #endif + + EA_NON_COPYABLE(EANonCopyable) + }; + #endif + + + // ------------------------------------------------------------------------ + // EA_OPTIMIZE_OFF / EA_OPTIMIZE_ON + // + // Implements portable inline optimization enabling/disabling. + // Usage of these macros must be in order OFF then ON. This is + // because the OFF macro pushes a set of settings and the ON + // macro pops them. The nesting of OFF/ON sets (e.g. OFF, OFF, ON, ON) + // is not guaranteed to work on all platforms. + // + // This is often used to allow debugging of some code that's + // otherwise compiled with undebuggable optimizations. It's also + // useful for working around compiler code generation problems + // that occur in optimized builds. + // + // Some compilers (e.g. VC++) don't allow doing this within a function and + // so the usage must be outside a function, as with the example below. + // GCC on x86 appears to have some problem with argument passing when + // using EA_OPTIMIZE_OFF in optimized builds. + // + // Example usage: + // // Disable optimizations for SomeFunction. + // EA_OPTIMIZE_OFF() + // void SomeFunction() + // { + // ... + // } + // EA_OPTIMIZE_ON() + // + #if !defined(EA_OPTIMIZE_OFF) + #if defined(EA_COMPILER_MSVC) + #define EA_OPTIMIZE_OFF() __pragma(optimize("", off)) + #elif defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION > 4004) && (defined(__i386__) || defined(__x86_64__)) // GCC 4.4+ - Seems to work only on x86/Linux so far. However, GCC 4.4 itself appears broken and screws up parameter passing conventions. + #define EA_OPTIMIZE_OFF() \ + _Pragma("GCC push_options") \ + _Pragma("GCC optimize 0") + #elif defined(EA_COMPILER_CLANG) && (!defined(EA_PLATFORM_ANDROID) || (EA_COMPILER_VERSION >= 380)) + #define EA_OPTIMIZE_OFF() \ + EA_DISABLE_CLANG_WARNING(-Wunknown-pragmas) \ + _Pragma("clang optimize off") \ + EA_RESTORE_CLANG_WARNING() + #else + #define EA_OPTIMIZE_OFF() + #endif + #endif + + #if !defined(EA_OPTIMIZE_ON) + #if defined(EA_COMPILER_MSVC) + #define EA_OPTIMIZE_ON() __pragma(optimize("", on)) + #elif defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION > 4004) && (defined(__i386__) || defined(__x86_64__)) // GCC 4.4+ - Seems to work only on x86/Linux so far. However, GCC 4.4 itself appears broken and screws up parameter passing conventions. + #define EA_OPTIMIZE_ON() _Pragma("GCC pop_options") + #elif defined(EA_COMPILER_CLANG) && (!defined(EA_PLATFORM_ANDROID) || (EA_COMPILER_VERSION >= 380)) + #define EA_OPTIMIZE_ON() \ + EA_DISABLE_CLANG_WARNING(-Wunknown-pragmas) \ + _Pragma("clang optimize on") \ + EA_RESTORE_CLANG_WARNING() + #else + #define EA_OPTIMIZE_ON() + #endif + #endif + + + + // ------------------------------------------------------------------------ + // EA_SIGNED_RIGHT_SHIFT_IS_UNSIGNED + // + // Defined if right shifts of signed integers (i.e. arithmetic shifts) fail + // to propogate the high bit downward, and thus preserve sign. Most hardware + // and their corresponding compilers do this. + // + // + +#endif // Header include guard + + + + + + + + + + diff --git a/external/EASTL/include/EABase/config/eaplatform.h b/external/EASTL/include/EABase/config/eaplatform.h new file mode 100644 index 00000000..cc297ecf --- /dev/null +++ b/external/EASTL/include/EABase/config/eaplatform.h @@ -0,0 +1,742 @@ +/*----------------------------------------------------------------------------- + * config/eaplatform.h + * + * Copyright (c) Electronic Arts Inc. All rights reserved. + *----------------------------------------------------------------------------- + * Currently supported platform indentification defines include: + */ +#ifdef EA_PLATFORM_PS4 // ifdef for code stripping purposes +// EA_PLATFORM_PS4 (EA_PLATFORM_KETTLE) +#endif +#ifdef EA_PLATFORM_XBOXONE // ifdef for code stripping purposes + // EA_PLATFORM_XBOXONE (EA_PLATFORM_CAPILANO) + // EA_PLATFORM_XBOXONE_XDK (EA_PLATFORM_CAPILANO_XDK), set by capilano_config package + // EA_PLATFORM_XBOXONE_ADK (EA_PLATFORM_CAPILANO_ADK), set by capilano_config package +#endif +// EA_PLATFORM_ANDROID +// EA_PLATFORM_APPLE +// EA_PLATFORM_IPHONE +// EA_PLATFORM_IPHONE_SIMULATOR +// EA_PLATFORM_OSX +// EA_PLATFORM_LINUX +// EA_PLATFORM_SAMSUNG_TV +// EA_PLATFORM_WINDOWS +// EA_PLATFORM_WIN32 +// EA_PLATFORM_WIN64 +// EA_PLATFORM_WINDOWS_PHONE +// EA_PLATFORM_WINRT +// EA_PLATFORM_SUN +// EA_PLATFORM_LRB (Larrabee) +// EA_PLATFORM_POSIX (pseudo-platform; may be defined along with another platform like EA_PLATFORM_LINUX, EA_PLATFORM_UNIX, EA_PLATFORM_QNX) +// EA_PLATFORM_UNIX (pseudo-platform; may be defined along with another platform like EA_PLATFORM_LINUX) +// EA_PLATFORM_CYGWIN (pseudo-platform; may be defined along with another platform like EA_PLATFORM_LINUX) +// EA_PLATFORM_MINGW (pseudo-platform; may be defined along with another platform like EA_PLATFORM_WINDOWS) +// EA_PLATFORM_MICROSOFT (pseudo-platform; may be defined along with another platform like EA_PLATFORM_WINDOWS) +// +// EA_ABI_ARM_LINUX (a.k.a. "eabi". for all platforms that use the CodeSourcery GNU/Linux toolchain, like Android) +// EA_ABI_ARM_APPLE (similar to eabi but not identical) +// EA_ABI_ARM64_APPLE (similar to eabi but not identical) https://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/Articles/ARM64FunctionCallingConventions.html +// EA_ABI_ARM_WINCE (similar to eabi but not identical) +// +// Other definitions emanated from this file inclue: +// EA_PLATFORM_NAME = +// EA_PLATFORM_DESCRIPTION = +// EA_PROCESSOR_XXX +// EA_MISALIGNED_SUPPORT_LEVEL=0|1|2 +// EA_SYSTEM_LITTLE_ENDIAN | EA_SYSTEM_BIG_ENDIAN +// EA_ASM_STYLE_ATT | EA_ASM_STYLE_INTEL | EA_ASM_STYLE_MOTOROLA +// EA_PLATFORM_PTR_SIZE = +// EA_PLATFORM_WORD_SIZE = +// EA_CACHE_LINE_SIZE = +//--------------------------------------------------------------------------- + +/* + EA_PLATFORM_MOBILE + EA_PLATFORM_MOBILE is a peer to EA_PLATORM_DESKTOP and EA_PLATFORM_CONSOLE. Their definition is qualitative rather + than quantitative, and refers to the general (usually weaker) capabilities of the machine. Mobile devices have a + similar set of weaknesses that are useful to generally categorize. The primary motivation is to avoid code that + tests for multiple mobile platforms on a line and needs to be updated every time we get a new one. + For example, mobile platforms tend to have weaker ARM processors, don't have full multiple processor support, + are hand-held, don't have mice (though may have touch screens or basic cursor controls), have writable solid + state permanent storage. Production user code shouldn't have too many expectations about the meaning of this define. + + EA_PLATFORM_DESKTOP + This is similar to EA_PLATFORM_MOBILE in its qualitative nature and refers to platforms that are powerful. + For example, they nearly always have virtual memory, mapped memory, hundreds of GB of writable disk storage, + TCP/IP network connections, mice, keyboards, 512+ MB of RAM, multiprocessing, multiple display support. + Production user code shouldn't have too many expectations about the meaning of this define. + + EA_PLATFORM_CONSOLE + This is similar to EA_PLATFORM_MOBILE in its qualitative nature and refers to platforms that are consoles. + This means platforms that are connected to TVs, are fairly powerful (especially graphics-wise), are tightly + controlled by vendors, tend not to have mapped memory, tend to have TCP/IP, don't have multiple process support + though they might have multiple CPUs, support TV output only. Production user code shouldn't have too many + expectations about the meaning of this define. + +*/ + + +#ifndef INCLUDED_eaplatform_H +#define INCLUDED_eaplatform_H + + +// Cygwin +// This is a pseudo-platform which will be defined along with EA_PLATFORM_LINUX when +// using the Cygwin build environment. +#if defined(__CYGWIN__) + #define EA_PLATFORM_CYGWIN 1 + #define EA_PLATFORM_DESKTOP 1 +#endif + +// MinGW +// This is a pseudo-platform which will be defined along with EA_PLATFORM_WINDOWS when +// using the MinGW Windows build environment. +#if defined(__MINGW32__) || defined(__MINGW64__) + #define EA_PLATFORM_MINGW 1 + #define EA_PLATFORM_DESKTOP 1 +#endif + +#if defined(EA_PLATFORM_PS4) || defined(__ORBIS__) || defined(EA_PLATFORM_KETTLE) + // PlayStation 4 + // Orbis was Sony's code-name for the platform, which is now obsolete. + // Kettle was an EA-specific code-name for the platform, which is now obsolete. + #if defined(EA_PLATFORM_PS4) + #undef EA_PLATFORM_PS4 + #endif + #define EA_PLATFORM_PS4 1 + + // Backward compatibility: + #if defined(EA_PLATFORM_KETTLE) + #undef EA_PLATFORM_KETTLE + #endif + // End backward compatbility + + #define EA_PLATFORM_KETTLE 1 + #define EA_PLATFORM_NAME "PS4" + #define EA_SYSTEM_LITTLE_ENDIAN 1 + #define EA_PLATFORM_DESCRIPTION "PS4 on x64" + #define EA_PLATFORM_CONSOLE 1 + #define EA_PLATFORM_SONY 1 + #define EA_PLATFORM_POSIX 1 + // #define EA_POSIX_THREADS_AVAILABLE 1 // POSIX threading API is available but discouraged. Sony indicated use of the scePthreads* API is preferred. + #define EA_PROCESSOR_X86_64 1 + #if defined(__GNUC__) || defined(__clang__) + #define EA_ASM_STYLE_ATT 1 + #endif + +#elif defined(EA_PLATFORM_XBOXONE) || defined(_DURANGO) || defined(_XBOX_ONE) || defined(EA_PLATFORM_CAPILANO) || defined(_GAMING_XBOX) + // XBox One + // Durango was Microsoft's code-name for the platform, which is now obsolete. + // Microsoft uses _DURANGO instead of some variation of _XBOX, though it's not natively defined by the compiler. + // Capilano was an EA-specific code-name for the platform, which is now obsolete. + #if defined(EA_PLATFORM_XBOXONE) + #undef EA_PLATFORM_XBOXONE + #endif + #define EA_PLATFORM_XBOXONE 1 + + // Backward compatibility: + #if defined(EA_PLATFORM_CAPILANO) + #undef EA_PLATFORM_CAPILANO + #endif + #define EA_PLATFORM_CAPILANO 1 + #if defined(EA_PLATFORM_CAPILANO_XDK) && !defined(EA_PLATFORM_XBOXONE_XDK) + #define EA_PLATFORM_XBOXONE_XDK 1 + #endif + #if defined(EA_PLATFORM_CAPILANO_ADK) && !defined(EA_PLATFORM_XBOXONE_ADK) + #define EA_PLATFORM_XBOXONE_ADK 1 + #endif + // End backward compatibility + + #if !defined(_DURANGO) + #define _DURANGO + #endif + #define EA_PLATFORM_NAME "XBox One" + //#define EA_PROCESSOR_X86 Currently our policy is that we don't define this, even though x64 is something of a superset of x86. + #define EA_PROCESSOR_X86_64 1 + #define EA_SYSTEM_LITTLE_ENDIAN 1 + #define EA_PLATFORM_DESCRIPTION "XBox One on x64" + #define EA_ASM_STYLE_INTEL 1 + #define EA_PLATFORM_CONSOLE 1 + #define EA_PLATFORM_MICROSOFT 1 + + // WINAPI_FAMILY defines - mirrored from winapifamily.h + #define EA_WINAPI_FAMILY_APP 1000 + #define EA_WINAPI_FAMILY_DESKTOP_APP 1001 + #define EA_WINAPI_FAMILY_PHONE_APP 1002 + #define EA_WINAPI_FAMILY_TV_APP 1003 + #define EA_WINAPI_FAMILY_TV_TITLE 1004 + #define EA_WINAPI_FAMILY_GAMES 1006 + + #if defined(WINAPI_FAMILY) + #include + #if defined(WINAPI_FAMILY_TV_TITLE) && WINAPI_FAMILY == WINAPI_FAMILY_TV_TITLE + #define EA_WINAPI_FAMILY EA_WINAPI_FAMILY_TV_TITLE + #elif defined(WINAPI_FAMILY_DESKTOP_APP) && WINAPI_FAMILY == WINAPI_FAMILY_DESKTOP_APP + #define EA_WINAPI_FAMILY EA_WINAPI_FAMILY_DESKTOP_APP + #elif defined(WINAPI_FAMILY_GAMES) && WINAPI_FAMILY == WINAPI_FAMILY_GAMES + #define EA_WINAPI_FAMILY EA_WINAPI_FAMILY_GAMES + #else + #error Unsupported WINAPI_FAMILY + #endif + #else + #error WINAPI_FAMILY should always be defined on Capilano. + #endif + + // Macro to determine if a partition is enabled. + #define EA_WINAPI_FAMILY_PARTITION(Partition) (Partition) + + #if EA_WINAPI_FAMILY == EA_WINAPI_FAMILY_DESKTOP_APP + #define EA_WINAPI_PARTITION_CORE 1 + #define EA_WINAPI_PARTITION_DESKTOP 1 + #define EA_WINAPI_PARTITION_APP 1 + #define EA_WINAPI_PARTITION_PC_APP 0 + #define EA_WIANPI_PARTITION_PHONE 0 + #define EA_WINAPI_PARTITION_TV_APP 0 + #define EA_WINAPI_PARTITION_TV_TITLE 0 + #define EA_WINAPI_PARTITION_GAMES 0 + #elif EA_WINAPI_FAMILY == EA_WINAPI_FAMILY_TV_TITLE + #define EA_WINAPI_PARTITION_CORE 1 + #define EA_WINAPI_PARTITION_DESKTOP 0 + #define EA_WINAPI_PARTITION_APP 0 + #define EA_WINAPI_PARTITION_PC_APP 0 + #define EA_WIANPI_PARTITION_PHONE 0 + #define EA_WINAPI_PARTITION_TV_APP 0 + #define EA_WINAPI_PARTITION_TV_TITLE 1 + #define EA_WINAPI_PARTITION_GAMES 0 + #elif EA_WINAPI_FAMILY == EA_WINAPI_FAMILY_GAMES + #define EA_WINAPI_PARTITION_CORE 1 + #define EA_WINAPI_PARTITION_DESKTOP 0 + #define EA_WINAPI_PARTITION_APP 0 + #define EA_WINAPI_PARTITION_PC_APP 0 + #define EA_WIANPI_PARTITION_PHONE 0 + #define EA_WINAPI_PARTITION_TV_APP 0 + #define EA_WINAPI_PARTITION_TV_TITLE 0 + #define EA_WINAPI_PARTITION_GAMES 1 + #else + #error Unsupported WINAPI_FAMILY + #endif + + #if EA_WINAPI_FAMILY_PARTITION(EA_WINAPI_PARTITION_GAMES) + #define CS_UNDEFINED_STRING 1 + #define CS_UNDEFINED_STRING 1 + #endif + + #if EA_WINAPI_FAMILY_PARTITION(EA_WINAPI_PARTITION_TV_TITLE) + #define EA_PLATFORM_XBOXONE_XDK 1 + #endif +#elif defined(EA_PLATFORM_LRB) || defined(__LRB__) || (defined(__EDG__) && defined(__ICC) && defined(__x86_64__)) + #undef EA_PLATFORM_LRB + #define EA_PLATFORM_LRB 1 + #define EA_PLATFORM_NAME "Larrabee" + #define EA_PLATFORM_DESCRIPTION "Larrabee on LRB1" + #define EA_PROCESSOR_X86_64 1 + #if defined(BYTE_ORDER) && (BYTE_ORDER == 4321) + #define EA_SYSTEM_BIG_ENDIAN 1 + #else + #define EA_SYSTEM_LITTLE_ENDIAN 1 + #endif + #define EA_PROCESSOR_LRB 1 + #define EA_PROCESSOR_LRB1 1 // Larrabee version 1 + #define EA_ASM_STYLE_ATT 1 // Both types of asm style + #define EA_ASM_STYLE_INTEL 1 // are supported. + #define EA_PLATFORM_DESKTOP 1 + +// Android (Google phone OS) +#elif defined(EA_PLATFORM_ANDROID) || defined(__ANDROID__) + #undef EA_PLATFORM_ANDROID + #define EA_PLATFORM_ANDROID 1 + #define EA_PLATFORM_LINUX 1 + #define EA_PLATFORM_UNIX 1 + #define EA_PLATFORM_POSIX 1 + #define EA_PLATFORM_NAME "Android" + #define EA_ASM_STYLE_ATT 1 + #if defined(__arm__) + #define EA_ABI_ARM_LINUX 1 // a.k.a. "ARM eabi" + #define EA_PROCESSOR_ARM32 1 + #define EA_PLATFORM_DESCRIPTION "Android on ARM" + #elif defined(__aarch64__) + #define EA_PROCESSOR_ARM64 1 + #define EA_PLATFORM_DESCRIPTION "Android on ARM64" + #elif defined(__i386__) + #define EA_PROCESSOR_X86 1 + #define EA_PLATFORM_DESCRIPTION "Android on x86" + #elif defined(__x86_64) + #define EA_PROCESSOR_X86_64 1 + #define EA_PLATFORM_DESCRIPTION "Android on x64" + #else + #error Unknown processor + #endif + #if !defined(EA_SYSTEM_BIG_ENDIAN) && !defined(EA_SYSTEM_LITTLE_ENDIAN) + #define EA_SYSTEM_LITTLE_ENDIAN 1 + #endif + #define EA_PLATFORM_MOBILE 1 + +// Samsung SMART TV - a Linux-based smart TV +#elif defined(EA_PLATFORM_SAMSUNG_TV) + #undef EA_PLATFORM_SAMSUNG_TV + #define EA_PLATFORM_SAMSUNG_TV 1 + #define EA_PLATFORM_LINUX 1 + #define EA_PLATFORM_UNIX 1 + #define EA_PLATFORM_POSIX 1 + #define EA_PLATFORM_NAME "SamsungTV" + #define EA_PLATFORM_DESCRIPTION "Samsung SMART TV on ARM" + #define EA_ASM_STYLE_ATT 1 + #define EA_SYSTEM_LITTLE_ENDIAN 1 + #define EA_PROCESSOR_ARM32 1 + #define EA_ABI_ARM_LINUX 1 // a.k.a. "ARM eabi" + #define EA_PROCESSOR_ARM7 1 + +#elif defined(__APPLE__) && __APPLE__ + #include + + // Apple family of operating systems. + #define EA_PLATFORM_APPLE + #define EA_PLATFORM_POSIX 1 + + // iPhone + // TARGET_OS_IPHONE will be undefined on an unknown compiler, and will be defined on gcc. + #if defined(EA_PLATFORM_IPHONE) || defined(__IPHONE__) || (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) || (defined(TARGET_IPHONE_SIMULATOR) && TARGET_IPHONE_SIMULATOR) + #undef EA_PLATFORM_IPHONE + #define EA_PLATFORM_IPHONE 1 + #define EA_PLATFORM_NAME "iPhone" + #define EA_ASM_STYLE_ATT 1 + #define EA_POSIX_THREADS_AVAILABLE 1 + #if defined(__arm__) + #define EA_ABI_ARM_APPLE 1 + #define EA_PROCESSOR_ARM32 1 + #define EA_SYSTEM_LITTLE_ENDIAN 1 + #define EA_PLATFORM_DESCRIPTION "iPhone on ARM" + #elif defined(__aarch64__) || defined(__AARCH64) + #define EA_ABI_ARM64_APPLE 1 + #define EA_PROCESSOR_ARM64 1 + #define EA_SYSTEM_LITTLE_ENDIAN 1 + #define EA_PLATFORM_DESCRIPTION "iPhone on ARM64" + #elif defined(__i386__) + #define EA_PLATFORM_IPHONE_SIMULATOR 1 + #define EA_PROCESSOR_X86 1 + #define EA_SYSTEM_LITTLE_ENDIAN 1 + #define EA_PLATFORM_DESCRIPTION "iPhone simulator on x86" + #elif defined(__x86_64) || defined(__amd64) + #define EA_PROCESSOR_X86_64 1 + #define EA_SYSTEM_LITTLE_ENDIAN 1 + #define EA_PLATFORM_DESCRIPTION "iPhone simulator on x64" + #else + #error Unknown processor + #endif + #define EA_PLATFORM_MOBILE 1 + + // Macintosh OSX + // TARGET_OS_MAC is defined by the Metrowerks and older AppleC compilers. + // Howerver, TARGET_OS_MAC is defined to be 1 in all cases. + // __i386__ and __intel__ are defined by the GCC compiler. + // __dest_os is defined by the Metrowerks compiler. + // __MACH__ is defined by the Metrowerks and GCC compilers. + // powerc and __powerc are defined by the Metrowerks and GCC compilers. + #elif defined(EA_PLATFORM_OSX) || defined(__MACH__) || (defined(__MSL__) && (__dest_os == __mac_os_x)) + #undef EA_PLATFORM_OSX + #define EA_PLATFORM_OSX 1 + #define EA_PLATFORM_UNIX 1 + #define EA_PLATFORM_POSIX 1 + //#define EA_PLATFORM_BSD 1 We don't currently define this. OSX has some BSD history but a lot of the API is different. + #define EA_PLATFORM_NAME "OSX" + #if defined(__i386__) || defined(__intel__) + #define EA_PROCESSOR_X86 1 + #define EA_SYSTEM_LITTLE_ENDIAN 1 + #define EA_PLATFORM_DESCRIPTION "OSX on x86" + #elif defined(__x86_64) || defined(__amd64) + #define EA_PROCESSOR_X86_64 1 + #define EA_SYSTEM_LITTLE_ENDIAN 1 + #define EA_PLATFORM_DESCRIPTION "OSX on x64" + #elif defined(__arm__) + #define EA_ABI_ARM_APPLE 1 + #define EA_PROCESSOR_ARM32 1 + #define EA_SYSTEM_LITTLE_ENDIAN 1 + #define EA_PLATFORM_DESCRIPTION "OSX on ARM" + #elif defined(__aarch64__) || defined(__AARCH64) + #define EA_ABI_ARM64_APPLE 1 + #define EA_PROCESSOR_ARM64 1 + #define EA_SYSTEM_LITTLE_ENDIAN 1 + #define EA_PLATFORM_DESCRIPTION "OSX on ARM64" + #elif defined(__POWERPC64__) || defined(__powerpc64__) + #define EA_PROCESSOR_POWERPC 1 + #define EA_PROCESSOR_POWERPC_64 1 + #define EA_SYSTEM_BIG_ENDIAN 1 + #define EA_PLATFORM_DESCRIPTION "OSX on PowerPC 64" + #elif defined(__POWERPC__) || defined(__powerpc__) + #define EA_PROCESSOR_POWERPC 1 + #define EA_PROCESSOR_POWERPC_32 1 + #define EA_SYSTEM_BIG_ENDIAN 1 + #define EA_PLATFORM_DESCRIPTION "OSX on PowerPC" + #else + #error Unknown processor + #endif + #if defined(__GNUC__) + #define EA_ASM_STYLE_ATT 1 + #else + #define EA_ASM_STYLE_MOTOROLA 1 + #endif + #define EA_PLATFORM_DESKTOP 1 + #else + #error Unknown Apple Platform + #endif + +// Linux +// __linux and __linux__ are defined by the GCC and Borland compiler. +// __i386__ and __intel__ are defined by the GCC compiler. +// __i386__ is defined by the Metrowerks compiler. +// _M_IX86 is defined by the Borland compiler. +// __sparc__ is defined by the GCC compiler. +// __powerpc__ is defined by the GCC compiler. +// __ARM_EABI__ is defined by GCC on an ARM v6l (Raspberry Pi 1) +// __ARM_ARCH_7A__ is defined by GCC on an ARM v7l (Raspberry Pi 2) +#elif defined(EA_PLATFORM_LINUX) || (defined(__linux) || defined(__linux__)) + #undef EA_PLATFORM_LINUX + #define EA_PLATFORM_LINUX 1 + #define EA_PLATFORM_UNIX 1 + #define EA_PLATFORM_POSIX 1 + #define EA_PLATFORM_NAME "Linux" + #if defined(__i386__) || defined(__intel__) || defined(_M_IX86) + #define EA_PROCESSOR_X86 1 + #define EA_SYSTEM_LITTLE_ENDIAN 1 + #define EA_PLATFORM_DESCRIPTION "Linux on x86" + #elif defined(__ARM_ARCH_7A__) || defined(__ARM_EABI__) + #define EA_ABI_ARM_LINUX 1 + #define EA_PROCESSOR_ARM32 1 + #define EA_PLATFORM_DESCRIPTION "Linux on ARM 6/7 32-bits" + #elif defined(__aarch64__) || defined(__AARCH64) + #define EA_PROCESSOR_ARM64 1 + #define EA_PLATFORM_DESCRIPTION "Linux on ARM64" + #elif defined(__x86_64__) + #define EA_PROCESSOR_X86_64 1 + #define EA_SYSTEM_LITTLE_ENDIAN 1 + #define EA_PLATFORM_DESCRIPTION "Linux on x64" + #elif defined(__powerpc64__) + #define EA_PROCESSOR_POWERPC 1 + #define EA_PROCESSOR_POWERPC_64 1 + #define EA_SYSTEM_BIG_ENDIAN 1 + #define EA_PLATFORM_DESCRIPTION "Linux on PowerPC 64" + #elif defined(__powerpc__) + #define EA_PROCESSOR_POWERPC 1 + #define EA_PROCESSOR_POWERPC_32 1 + #define EA_SYSTEM_BIG_ENDIAN 1 + #define EA_PLATFORM_DESCRIPTION "Linux on PowerPC" + #else + #error Unknown processor + #error Unknown endianness + #endif + #if defined(__GNUC__) + #define EA_ASM_STYLE_ATT 1 + #endif + #define EA_PLATFORM_DESKTOP 1 + + +#elif defined(EA_PLATFORM_BSD) || (defined(__BSD__) || defined(__FreeBSD__)) + #undef EA_PLATFORM_BSD + #define EA_PLATFORM_BSD 1 + #define EA_PLATFORM_UNIX 1 + #define EA_PLATFORM_POSIX 1 // BSD's posix complaince is not identical to Linux's + #define EA_PLATFORM_NAME "BSD Unix" + #if defined(__i386__) || defined(__intel__) + #define EA_PROCESSOR_X86 1 + #define EA_SYSTEM_LITTLE_ENDIAN 1 + #define EA_PLATFORM_DESCRIPTION "BSD on x86" + #elif defined(__x86_64__) + #define EA_PROCESSOR_X86_64 1 + #define EA_SYSTEM_LITTLE_ENDIAN 1 + #define EA_PLATFORM_DESCRIPTION "BSD on x64" + #elif defined(__powerpc64__) + #define EA_PROCESSOR_POWERPC 1 + #define EA_PROCESSOR_POWERPC_64 1 + #define EA_SYSTEM_BIG_ENDIAN 1 + #define EA_PLATFORM_DESCRIPTION "BSD on PowerPC 64" + #elif defined(__powerpc__) + #define EA_PROCESSOR_POWERPC 1 + #define EA_PROCESSOR_POWERPC_32 1 + #define EA_SYSTEM_BIG_ENDIAN 1 + #define EA_PLATFORM_DESCRIPTION "BSD on PowerPC" + #else + #error Unknown processor + #error Unknown endianness + #endif + #if !defined(EA_PLATFORM_FREEBSD) && defined(__FreeBSD__) + #define EA_PLATFORM_FREEBSD 1 // This is a variation of BSD. + #endif + #if defined(__GNUC__) + #define EA_ASM_STYLE_ATT 1 + #endif + #define EA_PLATFORM_DESKTOP 1 + + +#elif defined(EA_PLATFORM_WINDOWS_PHONE) + #undef EA_PLATFORM_WINDOWS_PHONE + #define EA_PLATFORM_WINDOWS_PHONE 1 + #define EA_PLATFORM_NAME "Windows Phone" + #if defined(_M_AMD64) || defined(_AMD64_) || defined(__x86_64__) + #define EA_PROCESSOR_X86_64 1 + #define EA_SYSTEM_LITTLE_ENDIAN 1 + #define EA_PLATFORM_DESCRIPTION "Windows Phone on x64" + #elif defined(_M_IX86) || defined(_X86_) + #define EA_PROCESSOR_X86 1 + #define EA_SYSTEM_LITTLE_ENDIAN 1 + #define EA_PLATFORM_DESCRIPTION "Windows Phone on X86" + #elif defined(_M_ARM) + #define EA_ABI_ARM_WINCE 1 + #define EA_PROCESSOR_ARM32 1 + #define EA_SYSTEM_LITTLE_ENDIAN 1 + #define EA_PLATFORM_DESCRIPTION "Windows Phone on ARM" + #else //Possibly other Windows Phone variants + #error Unknown processor + #error Unknown endianness + #endif + #define EA_PLATFORM_MICROSOFT 1 + + // WINAPI_FAMILY defines - mirrored from winapifamily.h + #define EA_WINAPI_FAMILY_APP 1 + #define EA_WINAPI_FAMILY_DESKTOP_APP 2 + #define EA_WINAPI_FAMILY_PHONE_APP 3 + + #if defined(WINAPI_FAMILY) + #include + #if WINAPI_FAMILY == WINAPI_FAMILY_PHONE_APP + #define EA_WINAPI_FAMILY EA_WINAPI_FAMILY_PHONE_APP + #else + #error Unsupported WINAPI_FAMILY for Windows Phone + #endif + #else + #error WINAPI_FAMILY should always be defined on Windows Phone. + #endif + + // Macro to determine if a partition is enabled. + #define EA_WINAPI_FAMILY_PARTITION(Partition) (Partition) + + // Enable the appropriate partitions for the current family + #if EA_WINAPI_FAMILY == EA_WINAPI_FAMILY_PHONE_APP + # define EA_WINAPI_PARTITION_CORE 1 + # define EA_WINAPI_PARTITION_PHONE 1 + # define EA_WINAPI_PARTITION_APP 1 + #else + # error Unsupported WINAPI_FAMILY for Windows Phone + #endif + + +// Windows +// _WIN32 is defined by the VC++, Intel and GCC compilers. +// _WIN64 is defined by the VC++, Intel and GCC compilers. +// __WIN32__ is defined by the Borland compiler. +// __INTEL__ is defined by the Metrowerks compiler. +// _M_IX86, _M_AMD64 and _M_IA64 are defined by the VC++, Intel, and Borland compilers. +// _X86_, _AMD64_, and _IA64_ are defined by the Metrowerks compiler. +// _M_ARM is defined by the VC++ compiler. +#elif (defined(EA_PLATFORM_WINDOWS) || (defined(_WIN32) || defined(__WIN32__) || defined(_WIN64))) && !defined(CS_UNDEFINED_STRING) + #undef EA_PLATFORM_WINDOWS + #define EA_PLATFORM_WINDOWS 1 + #define EA_PLATFORM_NAME "Windows" + #ifdef _WIN64 // VC++ defines both _WIN32 and _WIN64 when compiling for Win64. + #define EA_PLATFORM_WIN64 1 + #else + #define EA_PLATFORM_WIN32 1 + #endif + #if defined(_M_AMD64) || defined(_AMD64_) || defined(__x86_64__) + #define EA_PROCESSOR_X86_64 1 + #define EA_SYSTEM_LITTLE_ENDIAN 1 + #define EA_PLATFORM_DESCRIPTION "Windows on x64" + #elif defined(_M_IX86) || defined(_X86_) + #define EA_PROCESSOR_X86 1 + #define EA_SYSTEM_LITTLE_ENDIAN 1 + #define EA_PLATFORM_DESCRIPTION "Windows on X86" + #elif defined(_M_IA64) || defined(_IA64_) + #define EA_PROCESSOR_IA64 1 + #define EA_SYSTEM_LITTLE_ENDIAN 1 + #define EA_PLATFORM_DESCRIPTION "Windows on IA-64" + #elif defined(_M_ARM) + #define EA_ABI_ARM_WINCE 1 + #define EA_PROCESSOR_ARM32 1 + #define EA_SYSTEM_LITTLE_ENDIAN 1 + #define EA_PLATFORM_DESCRIPTION "Windows on ARM" + #elif defined(_M_ARM64) + #define EA_PROCESSOR_ARM64 1 + #define EA_SYSTEM_LITTLE_ENDIAN 1 + #define EA_PLATFORM_DESCRIPTION "Windows on ARM64" + #else //Possibly other Windows CE variants + #error Unknown processor + #error Unknown endianness + #endif + #if defined(__GNUC__) + #define EA_ASM_STYLE_ATT 1 + #elif defined(_MSC_VER) || defined(__BORLANDC__) || defined(__ICL) + #define EA_ASM_STYLE_INTEL 1 + #endif + #define EA_PLATFORM_DESKTOP 1 + #define EA_PLATFORM_MICROSOFT 1 + + #if defined(_KERNEL_MODE) + #define EA_PLATFORM_WINDOWS_KERNEL 1 + #endif + + // WINAPI_FAMILY defines to support Windows 8 Metro Apps - mirroring winapifamily.h in the Windows 8 SDK + #define EA_WINAPI_FAMILY_APP 1000 + #define EA_WINAPI_FAMILY_DESKTOP_APP 1001 + #define EA_WINAPI_FAMILY_GAMES 1006 + + #if defined(WINAPI_FAMILY) + #if defined(_MSC_VER) + #pragma warning(push, 0) + #endif + #include + #if defined(_MSC_VER) + #pragma warning(pop) + #endif + #if defined(WINAPI_FAMILY_DESKTOP_APP) && WINAPI_FAMILY == WINAPI_FAMILY_DESKTOP_APP + #define EA_WINAPI_FAMILY EA_WINAPI_FAMILY_DESKTOP_APP + #elif defined(WINAPI_FAMILY_APP) && WINAPI_FAMILY == WINAPI_FAMILY_APP + #define EA_WINAPI_FAMILY EA_WINAPI_FAMILY_APP + #elif defined(WINAPI_FAMILY_GAMES) && WINAPI_FAMILY == WINAPI_FAMILY_GAMES + #define EA_WINAPI_FAMILY EA_WINAPI_FAMILY_GAMES + #else + #error Unsupported WINAPI_FAMILY + #endif + #else + #define EA_WINAPI_FAMILY EA_WINAPI_FAMILY_DESKTOP_APP + #endif + + #define EA_WINAPI_PARTITION_DESKTOP 1 + #define EA_WINAPI_PARTITION_APP 1 + #define EA_WINAPI_PARTITION_GAMES (EA_WINAPI_FAMILY == EA_WINAPI_FAMILY_GAMES) + + #define EA_WINAPI_FAMILY_PARTITION(Partition) (Partition) + + // EA_PLATFORM_WINRT + // This is a subset of Windows which is used for tablets and the "Metro" (restricted) Windows user interface. + // WinRT doesn't doesn't have access to the Windows "desktop" API, but WinRT can nevertheless run on + // desktop computers in addition to tablets. The Windows Phone API is a subset of WinRT and is not included + // in it due to it being only a part of the API. + #if defined(__cplusplus_winrt) + #define EA_PLATFORM_WINRT 1 + #endif + +// Sun (Solaris) +// __SUNPRO_CC is defined by the Sun compiler. +// __sun is defined by the GCC compiler. +// __i386 is defined by the Sun and GCC compilers. +// __sparc is defined by the Sun and GCC compilers. +#else + #error Unknown platform + #error Unknown processor + #error Unknown endianness +#endif + +#ifndef EA_PROCESSOR_ARM + #if defined(EA_PROCESSOR_ARM32) || defined(EA_PROCESSOR_ARM64) || defined(EA_PROCESSOR_ARM7) + #define EA_PROCESSOR_ARM + #endif +#endif + +// EA_PLATFORM_PTR_SIZE +// Platform pointer size; same as sizeof(void*). +// This is not the same as sizeof(int), as int is usually 32 bits on +// even 64 bit platforms. +// +// _WIN64 is defined by Win64 compilers, such as VC++. +// _M_IA64 is defined by VC++ and Intel compilers for IA64 processors. +// __LP64__ is defined by HP compilers for the LP64 standard. +// _LP64 is defined by the GCC and Sun compilers for the LP64 standard. +// __ia64__ is defined by the GCC compiler for IA64 processors. +// __arch64__ is defined by the Sparc compiler for 64 bit processors. +// __mips64__ is defined by the GCC compiler for MIPS processors. +// __powerpc64__ is defined by the GCC compiler for PowerPC processors. +// __64BIT__ is defined by the AIX compiler for 64 bit processors. +// __sizeof_ptr is defined by the ARM compiler (armcc, armcpp). +// +#ifndef EA_PLATFORM_PTR_SIZE + #if defined(__WORDSIZE) // Defined by some variations of GCC. + #define EA_PLATFORM_PTR_SIZE ((__WORDSIZE) / 8) + #elif defined(_WIN64) || defined(__LP64__) || defined(_LP64) || defined(_M_IA64) || defined(__ia64__) || defined(__arch64__) || defined(__aarch64__) || defined(__mips64__) || defined(__64BIT__) || defined(__Ptr_Is_64) + #define EA_PLATFORM_PTR_SIZE 8 + #elif defined(__CC_ARM) && (__sizeof_ptr == 8) + #define EA_PLATFORM_PTR_SIZE 8 + #else + #define EA_PLATFORM_PTR_SIZE 4 + #endif +#endif + + + +// EA_PLATFORM_WORD_SIZE +// This defines the size of a machine word. This will be the same as +// the size of registers on the machine but not necessarily the same +// as the size of pointers on the machine. A number of 64 bit platforms +// have 64 bit registers but 32 bit pointers. +// +#ifndef EA_PLATFORM_WORD_SIZE + #define EA_PLATFORM_WORD_SIZE EA_PLATFORM_PTR_SIZE +#endif + +// EA_PLATFORM_MIN_MALLOC_ALIGNMENT +// This defines the minimal alignment that the platform's malloc +// implementation will return. This should be used when writing custom +// allocators to ensure that the alignment matches that of malloc +#ifndef EA_PLATFORM_MIN_MALLOC_ALIGNMENT + #if defined(EA_PLATFORM_APPLE) + #define EA_PLATFORM_MIN_MALLOC_ALIGNMENT 16 + #elif defined(EA_PLATFORM_ANDROID) && defined(EA_PROCESSOR_ARM) + #define EA_PLATFORM_MIN_MALLOC_ALIGNMENT 8 + #elif defined(EA_PLATFORM_ANDROID) && defined(EA_PROCESSOR_X86_64) + #define EA_PLATFORM_MIN_MALLOC_ALIGNMENT 8 + #else + #define EA_PLATFORM_MIN_MALLOC_ALIGNMENT (EA_PLATFORM_PTR_SIZE * 2) + #endif +#endif + + +// EA_MISALIGNED_SUPPORT_LEVEL +// Specifies if the processor can read and write built-in types that aren't +// naturally aligned. +// 0 - not supported. Likely causes an exception. +// 1 - supported but slow. +// 2 - supported and fast. +// +#ifndef EA_MISALIGNED_SUPPORT_LEVEL + #if defined(EA_PROCESSOR_X86_64) + #define EA_MISALIGNED_SUPPORT_LEVEL 2 + #else + #define EA_MISALIGNED_SUPPORT_LEVEL 0 + #endif +#endif + +// Macro to determine if a Windows API partition is enabled. Always false on non Microsoft platforms. +#if !defined(EA_WINAPI_FAMILY_PARTITION) + #define EA_WINAPI_FAMILY_PARTITION(Partition) (0) +#endif + + +// EA_CACHE_LINE_SIZE +// Specifies the cache line size broken down by compile target. +// This the expected best guess values for the targets that we can make at compilation time. + +#ifndef EA_CACHE_LINE_SIZE + #if defined(EA_PROCESSOR_X86) + #define EA_CACHE_LINE_SIZE 32 // This is the minimum possible value. + #elif defined(EA_PROCESSOR_X86_64) + #define EA_CACHE_LINE_SIZE 64 // This is the minimum possible value + #elif defined(EA_PROCESSOR_ARM32) + #define EA_CACHE_LINE_SIZE 32 // This varies between implementations and is usually 32 or 64. + #elif defined(EA_PROCESSOR_ARM64) + #define EA_CACHE_LINE_SIZE 64 // Cache line Cortex-A8 (64 bytes) http://shervinemami.info/armAssembly.html however this remains to be mostly an assumption at this stage + #elif (EA_PLATFORM_WORD_SIZE == 4) + #define EA_CACHE_LINE_SIZE 32 // This is the minimum possible value + #else + #define EA_CACHE_LINE_SIZE 64 // This is the minimum possible value + #endif +#endif + + +#endif // INCLUDED_eaplatform_H + + + + + + + + + diff --git a/external/EASTL/include/EABase/eabase.h b/external/EASTL/include/EABase/eabase.h new file mode 100644 index 00000000..dab9e467 --- /dev/null +++ b/external/EASTL/include/EABase/eabase.h @@ -0,0 +1,1011 @@ +/*----------------------------------------------------------------------------- + * eabase.h + * + * Copyright (c) Electronic Arts Inc. All rights reserved. + *---------------------------------------------------------------------------*/ + + +#ifndef INCLUDED_eabase_H +#define INCLUDED_eabase_H + + +// Identify the compiler and declare the EA_COMPILER_xxxx defines +#include + +// Identify traits which this compiler supports, or does not support +#include + +// Identify the platform and declare the EA_xxxx defines +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + +// Always include version.h for backwards compatibility. +#include + +// Define common SI unit macros +#include + + +// ------------------------------------------------------------------------ +// The C++ standard defines size_t as a built-in type. Some compilers are +// not standards-compliant in this respect, so we need an additional include. +// The case is similar with wchar_t under C++. + +#if defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_MSVC) || defined(EA_WCHAR_T_NON_NATIVE) || defined(EA_PLATFORM_SONY) + #if defined(EA_COMPILER_MSVC) + #pragma warning(push, 0) + #pragma warning(disable: 4265 4365 4836 4574) + #endif + #include + #if defined(EA_COMPILER_MSVC) + #pragma warning(pop) + #endif +#endif + +// ------------------------------------------------------------------------ +// Include stddef.h on Apple's clang compiler to ensure the ptrdiff_t type +// is defined. +#if defined(EA_COMPILER_CLANG) && defined(EA_PLATFORM_APPLE) + #include +#endif + +// ------------------------------------------------------------------------ +// Include assert.h on C11 supported compilers so we may allow static_assert usage +// http://en.cppreference.com/w/c/error/static_assert +// C11 standard(ISO / IEC 9899:2011) : +// 7.2/3 Diagnostics (p : 186) +#if !defined(__cplusplus) && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201100L + #include +#endif + + +// ------------------------------------------------------------------------ +// By default, GCC defines NULL as ((void*)0), which is the +// C definition. This causes all sort of problems for C++ code, so it is +// worked around by undefining NULL. + +#if defined(NULL) + #undef NULL +#endif + + +// ------------------------------------------------------------------------ +// Define the NULL pointer. This is normally defined in , but we +// don't want to force a global dependency on that header, so the definition +// is duplicated here. + +#if defined(__cplusplus) + #define NULL 0 +#else + #define NULL ((void*)0) +#endif + + +// ------------------------------------------------------------------------ +// C98/99 Standard typedefs. From the ANSI ISO/IEC 9899 standards document +// Most recent versions of the gcc-compiler come with these defined in +// inttypes.h or stddef.h. Determining if they are predefined can be +// tricky, so we expect some problems on non-standard compilers + +//#if (defined(_INTTYPES_H) || defined(_INTTYPES_H_)) && !defined(PRId64) +// #error " was #included before eabase.h, but without __STDC_FORMAT_MACROS #defined. You must #include eabase.h or an equivalent before #including C99 headers, or you must define __STDC_FORMAT_MACRO before #including system headrs." +//#endif + +// ------------------------------------------------------------------------ +// We need to test this after we potentially include stddef.h, otherwise we +// would have put this into the compilertraits header. +#if !defined(EA_COMPILER_HAS_INTTYPES) && (!defined(_MSC_VER) || (_MSC_VER > 1500)) && (defined(EA_COMPILER_IS_C99) || defined(INT8_MIN) || defined(EA_COMPILER_HAS_C99_TYPES) || defined(_SN_STDINT_H)) + #define EA_COMPILER_HAS_INTTYPES +#endif + +#ifdef EA_COMPILER_HAS_INTTYPES // If the compiler supports inttypes... + // ------------------------------------------------------------------------ + // Include the stdint header to define and derive the required types. + // Additionally include inttypes.h as many compilers, including variations + // of GCC define things in inttypes.h that the C99 standard says goes + // in stdint.h. + // + // The C99 standard specifies that inttypes.h only define printf/scanf + // format macros if __STDC_FORMAT_MACROS is defined before #including + // inttypes.h. For consistency, we do that here. + #ifndef __STDC_FORMAT_MACROS + #define __STDC_FORMAT_MACROS + #endif + // The GCC PSP compiler defines standard int types (e.g. uint32_t) but not PRId8, etc. + // MSVC added support for inttypes.h header in VS2013. + #if !defined(EA_COMPILER_MSVC) || (defined(EA_COMPILER_MSVC) && EA_COMPILER_VERSION >= 1800) + #include // PRId8, SCNd8, etc. + #endif + #if defined(_MSC_VER) + #pragma warning(push, 0) + #endif + #include // int32_t, INT64_C, UINT8_MAX, etc. + #include // float_t, double_t, etc. + #include // FLT_EVAL_METHOD. + #if defined(_MSC_VER) + #pragma warning(pop) + #endif + + #if !defined(FLT_EVAL_METHOD) && (defined(__FLT_EVAL_METHOD__) || defined(_FEVAL)) // GCC 3.x defines __FLT_EVAL_METHOD__ instead of the C99 standard FLT_EVAL_METHOD. + #ifdef __FLT_EVAL_METHOD__ + #define FLT_EVAL_METHOD __FLT_EVAL_METHOD__ + #else + #define FLT_EVAL_METHOD _FEVAL + #endif + #endif + + // MinGW GCC (up to at least v4.3.0-20080502) mistakenly neglects to define float_t and double_t. + // This appears to be an acknowledged bug as of March 2008 and is scheduled to be fixed. + // Similarly, Android uses a mix of custom standard library headers which prior to SDK API level 21 + // don't define float_t and double_t. + #if defined(__MINGW32__) || (defined(EA_PLATFORM_ANDROID) && !(defined(EA_ANDROID_SDK_LEVEL) && EA_ANDROID_SDK_LEVEL >= 21)) + #if defined(__FLT_EVAL_METHOD__) + #if(__FLT_EVAL_METHOD__== 0) + typedef float float_t; + typedef double double_t; + #elif(__FLT_EVAL_METHOD__ == 1) + typedef double float_t; + typedef double double_t; + #elif(__FLT_EVAL_METHOD__ == 2) + typedef long double float_t; + typedef long double double_t; + #endif + #else + typedef float float_t; + typedef double double_t; + #endif + #endif + + // The CodeSourcery definitions of PRIxPTR and SCNxPTR are broken for 32 bit systems. + #if defined(__SIZEOF_SIZE_T__) && (__SIZEOF_SIZE_T__ == 4) && (defined(__have_long64) || defined(__have_longlong64)) + #undef PRIdPTR + #define PRIdPTR "d" + #undef PRIiPTR + #define PRIiPTR "i" + #undef PRIoPTR + #define PRIoPTR "o" + #undef PRIuPTR + #define PRIuPTR "u" + #undef PRIxPTR + #define PRIxPTR "x" + #undef PRIXPTR + #define PRIXPTR "X" + + #undef SCNdPTR + #define SCNdPTR "d" + #undef SCNiPTR + #define SCNiPTR "i" + #undef SCNoPTR + #define SCNoPTR "o" + #undef SCNuPTR + #define SCNuPTR "u" + #undef SCNxPTR + #define SCNxPTR "x" + #endif +#else // else we must implement types ourselves. + + #if !defined(__BIT_TYPES_DEFINED__) && !defined(__int8_t_defined) + typedef signed char int8_t; //< 8 bit signed integer + #endif + #if !defined( __int8_t_defined ) + typedef signed short int16_t; //< 16 bit signed integer + typedef signed int int32_t; //< 32 bit signed integer. This works for both 32 bit and 64 bit platforms, as we assume the LP64 is followed. + #define __int8_t_defined + #endif + typedef unsigned char uint8_t; //< 8 bit unsigned integer + typedef unsigned short uint16_t; //< 16 bit unsigned integer + #if !defined( __uint32_t_defined ) + typedef unsigned int uint32_t; //< 32 bit unsigned integer. This works for both 32 bit and 64 bit platforms, as we assume the LP64 is followed. + #define __uint32_t_defined + #endif + + // According to the C98/99 standard, FLT_EVAL_METHOD defines control the + // width used for floating point _t types. + #if defined(_MSC_VER) && _MSC_VER >= 1800 + // MSVC's math.h provides float_t, double_t under this condition. + #elif defined(FLT_EVAL_METHOD) + #if (FLT_EVAL_METHOD == 0) + typedef float float_t; + typedef double double_t; + #elif (FLT_EVAL_METHOD == 1) + typedef double float_t; + typedef double double_t; + #elif (FLT_EVAL_METHOD == 2) + typedef long double float_t; + typedef long double double_t; + #endif + #endif + + #if defined(EA_COMPILER_MSVC) + typedef signed __int64 int64_t; + typedef unsigned __int64 uint64_t; + + #else + typedef signed long long int64_t; + typedef unsigned long long uint64_t; + #endif +#endif + + +// ------------------------------------------------------------------------ +// macros for declaring constants in a portable way. +// +// e.g. int64_t x = INT64_C(1234567812345678); +// e.g. int64_t x = INT64_C(0x1111111122222222); +// e.g. uint64_t x = UINT64_C(0x1111111122222222); +// +// Microsoft VC++'s definitions of INT8_C/UINT8_C/INT16_C/UINT16_C are like so: +// #define INT8_C(x) (x) +// #define INT16_C(x) (x) +// #define UINT8_C(x) (x) +// #define UINT16_C(x) (x) +// To consider: undefine Microsoft's and use the casting versions below. +// ------------------------------------------------------------------------ + +#ifndef INT8_C_DEFINED // If the user hasn't already defined these... + #define INT8_C_DEFINED + + #ifndef INT8_C + #define INT8_C(x) int8_t(x) // For the majority of compilers and platforms, long is 32 bits and long long is 64 bits. + #endif + #ifndef UINT8_C + #define UINT8_C(x) uint8_t(x) + #endif + #ifndef INT16_C + #define INT16_C(x) int16_t(x) + #endif + #ifndef UINT16_C + #define UINT16_C(x) uint16_t(x) // Possibly we should make this be uint16_t(x##u). Let's see how compilers react before changing this. + #endif + #ifndef INT32_C + #define INT32_C(x) x##L + #endif + #ifndef UINT32_C + #define UINT32_C(x) x##UL + #endif + #ifndef INT64_C + #define INT64_C(x) x##LL // The way to deal with this is to compare ULONG_MAX to 0xffffffff and if not equal, then remove the L. + #endif + #ifndef UINT64_C + #define UINT64_C(x) x##ULL // We need to follow a similar approach for LL. + #endif + #ifndef UINTMAX_C + #define UINTMAX_C(x) UINT64_C(x) + #endif +#endif + +// ------------------------------------------------------------------------ +// type sizes +#ifndef INT8_MAX_DEFINED // If the user hasn't already defined these... + #define INT8_MAX_DEFINED + + // The value must be 2^(n-1)-1 + #ifndef INT8_MAX + #define INT8_MAX 127 + #endif + #ifndef INT16_MAX + #define INT16_MAX 32767 + #endif + #ifndef INT32_MAX + #define INT32_MAX 2147483647 + #endif + #ifndef INT64_MAX + #define INT64_MAX INT64_C(9223372036854775807) + #endif + #ifndef INTMAX_MAX + #define INTMAX_MAX INT64_MAX + #endif + #ifndef INTPTR_MAX + #if EA_PLATFORM_PTR_SIZE == 4 + #define INTPTR_MAX INT32_MAX + #else + #define INTPTR_MAX INT64_MAX + #endif + #endif + + // The value must be either -2^(n-1) or 1-2(n-1). + #ifndef INT8_MIN + #define INT8_MIN -128 + #endif + #ifndef INT16_MIN + #define INT16_MIN -32768 + #endif + #ifndef INT32_MIN + #define INT32_MIN (-INT32_MAX - 1) // -2147483648 + #endif + #ifndef INT64_MIN + #define INT64_MIN (-INT64_MAX - 1) // -9223372036854775808 + #endif + #ifndef INTMAX_MIN + #define INTMAX_MIN INT64_MIN + #endif + #ifndef INTPTR_MIN + #if EA_PLATFORM_PTR_SIZE == 4 + #define INTPTR_MIN INT32_MIN + #else + #define INTPTR_MIN INT64_MIN + #endif + #endif + + // The value must be 2^n-1 + #ifndef UINT8_MAX + #define UINT8_MAX 0xffU // 255 + #endif + #ifndef UINT16_MAX + #define UINT16_MAX 0xffffU // 65535 + #endif + #ifndef UINT32_MAX + #define UINT32_MAX UINT32_C(0xffffffff) // 4294967295 + #endif + #ifndef UINT64_MAX + #define UINT64_MAX UINT64_C(0xffffffffffffffff) // 18446744073709551615 + #endif + #ifndef UINTMAX_MAX + #define UINTMAX_MAX UINT64_MAX + #endif + #ifndef UINTPTR_MAX + #if EA_PLATFORM_PTR_SIZE == 4 + #define UINTPTR_MAX UINT32_MAX + #else + #define UINTPTR_MAX UINT64_MAX + #endif + #endif +#endif + +#ifndef FLT_EVAL_METHOD + #define FLT_EVAL_METHOD 0 + typedef float float_t; + typedef double double_t; +#endif + +#if defined(EA_COMPILER_HAS_INTTYPES) && (!defined(EA_COMPILER_MSVC) || (defined(EA_COMPILER_MSVC) && EA_COMPILER_VERSION >= 1800)) + #define EA_COMPILER_HAS_C99_FORMAT_MACROS +#endif + +#ifndef EA_COMPILER_HAS_C99_FORMAT_MACROS + // ------------------------------------------------------------------------ + // sized printf and scanf format specifiers + // See the C99 standard, section 7.8.1 -- Macros for format specifiers. + // + // The C99 standard specifies that inttypes.h only define printf/scanf + // format macros if __STDC_FORMAT_MACROS is defined before #including + // inttypes.h. For consistency, we define both __STDC_FORMAT_MACROS and + // the printf format specifiers here. We also skip the "least/most" + // variations of these specifiers, as we've decided to do so with + // basic types. + // + // For 64 bit systems, we assume the LP64 standard is followed + // (as opposed to ILP64, etc.) For 32 bit systems, we assume the + // ILP32 standard is followed. See: + // http://www.opengroup.org/public/tech/aspen/lp64_wp.htm + // for information about this. Thus, on both 32 and 64 bit platforms, + // %l refers to 32 bit data while %ll refers to 64 bit data. + + #ifndef __STDC_FORMAT_MACROS + #define __STDC_FORMAT_MACROS + #endif + + #if defined(EA_COMPILER_MSVC) // VC++ 7.1+ understands long long as a data type but doesn't accept %ll as a printf specifier. + #define EA_PRI_64_LENGTH_SPECIFIER "I64" + #define EA_SCN_64_LENGTH_SPECIFIER "I64" + #else + #define EA_PRI_64_LENGTH_SPECIFIER "ll" + #define EA_SCN_64_LENGTH_SPECIFIER "ll" + #endif // It turns out that some platforms use %q to represent a 64 bit value, but these are not relevant to us at this time. + + // Printf format specifiers + #if defined(EA_COMPILER_IS_C99) || defined(EA_COMPILER_GNUC) + #define PRId8 "hhd" + #define PRIi8 "hhi" + #define PRIo8 "hho" + #define PRIu8 "hhu" + #define PRIx8 "hhx" + #define PRIX8 "hhX" + #else // VC++, Borland, etc. which have no way to specify 8 bit values other than %c. + #define PRId8 "c" // This may not work properly but it at least will not crash. Try using 16 bit versions instead. + #define PRIi8 "c" // " + #define PRIo8 "o" // " + #define PRIu8 "u" // " + #define PRIx8 "x" // " + #define PRIX8 "X" // " + #endif + + #define PRId16 "hd" + #define PRIi16 "hi" + #define PRIo16 "ho" + #define PRIu16 "hu" + #define PRIx16 "hx" + #define PRIX16 "hX" + + #define PRId32 "d" // This works for both 32 bit and 64 bit systems, as we assume LP64 conventions. + #define PRIi32 "i" + #define PRIo32 "o" + #define PRIu32 "u" + #define PRIx32 "x" + #define PRIX32 "X" + + #define PRId64 EA_PRI_64_LENGTH_SPECIFIER "d" + #define PRIi64 EA_PRI_64_LENGTH_SPECIFIER "i" + #define PRIo64 EA_PRI_64_LENGTH_SPECIFIER "o" + #define PRIu64 EA_PRI_64_LENGTH_SPECIFIER "u" + #define PRIx64 EA_PRI_64_LENGTH_SPECIFIER "x" + #define PRIX64 EA_PRI_64_LENGTH_SPECIFIER "X" + + #if (EA_PLATFORM_PTR_SIZE == 4) + #define PRIdPTR PRId32 // Usage of pointer values will generate warnings with + #define PRIiPTR PRIi32 // some compilers because they are defined in terms of + #define PRIoPTR PRIo32 // integers. However, you can't simply use "p" because + #define PRIuPTR PRIu32 // 'p' is interpreted in a specific and often different + #define PRIxPTR PRIx32 // way by the library. + #define PRIXPTR PRIX32 + #elif (EA_PLATFORM_PTR_SIZE == 8) + #define PRIdPTR PRId64 + #define PRIiPTR PRIi64 + #define PRIoPTR PRIo64 + #define PRIuPTR PRIu64 + #define PRIxPTR PRIx64 + #define PRIXPTR PRIX64 + #endif + + // Scanf format specifiers + #if defined(EA_COMPILER_IS_C99) || defined(EA_COMPILER_GNUC) + #define SCNd8 "hhd" + #define SCNi8 "hhi" + #define SCNo8 "hho" + #define SCNu8 "hhu" + #define SCNx8 "hhx" + #else // VC++, Borland, etc. which have no way to specify 8 bit values other than %c. + #define SCNd8 "c" // This will not work properly but it at least will not crash. Try using 16 bit versions instead. + #define SCNi8 "c" // " + #define SCNo8 "c" // " + #define SCNu8 "c" // " + #define SCNx8 "c" // " + #endif + + #define SCNd16 "hd" + #define SCNi16 "hi" + #define SCNo16 "ho" + #define SCNu16 "hu" + #define SCNx16 "hx" + + #define SCNd32 "d" // This works for both 32 bit and 64 bit systems, as we assume LP64 conventions. + #define SCNi32 "i" + #define SCNo32 "o" + #define SCNu32 "u" + #define SCNx32 "x" + + #define SCNd64 EA_SCN_64_LENGTH_SPECIFIER "d" + #define SCNi64 EA_SCN_64_LENGTH_SPECIFIER "i" + #define SCNo64 EA_SCN_64_LENGTH_SPECIFIER "o" + #define SCNu64 EA_SCN_64_LENGTH_SPECIFIER "u" + #define SCNx64 EA_SCN_64_LENGTH_SPECIFIER "x" + + #if defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1900) + #define SCNdPTR PRIdPTR + #define SCNiPTR PRIiPTR + #define SCNoPTR PRIoPTR + #define SCNuPTR PRIuPTR + #define SCNxPTR PRIxPTR + #elif (EA_PLATFORM_PTR_SIZE == 4) + #define SCNdPTR SCNd32 // Usage of pointer values will generate warnings with + #define SCNiPTR SCNi32 // some compilers because they are defined in terms of + #define SCNoPTR SCNo32 // integers. However, you can't simply use "p" because + #define SCNuPTR SCNu32 // 'p' is interpreted in a specific and often different + #define SCNxPTR SCNx32 // way by the library. + #elif (EA_PLATFORM_PTR_SIZE == 8) + #define SCNdPTR SCNd64 + #define SCNiPTR SCNi64 + #define SCNoPTR SCNo64 + #define SCNuPTR SCNu64 + #define SCNxPTR SCNx64 + #endif +#endif + + +// ------------------------------------------------------------------------ +// bool8_t +// The definition of a bool8_t is controversial with some, as it doesn't +// act just like built-in bool. For example, you can assign -100 to it. +// +#ifndef BOOL8_T_DEFINED // If the user hasn't already defined this... + #define BOOL8_T_DEFINED + #if defined(EA_COMPILER_MSVC) || (defined(EA_COMPILER_INTEL) && defined(EA_PLATFORM_WINDOWS)) + #if defined(__cplusplus) + typedef bool bool8_t; + #else + typedef int8_t bool8_t; + #endif + #else // EA_COMPILER_GNUC generally uses 4 bytes per bool. + typedef int8_t bool8_t; + #endif +#endif + + +// ------------------------------------------------------------------------ +// intptr_t / uintptr_t +// Integer type guaranteed to be big enough to hold +// a native pointer ( intptr_t is defined in STDDEF.H ) +// +#if !defined(_INTPTR_T_DEFINED) && !defined(_intptr_t_defined) && !defined(EA_COMPILER_HAS_C99_TYPES) + #if (EA_PLATFORM_PTR_SIZE == 4) + typedef int32_t intptr_t; + #elif (EA_PLATFORM_PTR_SIZE == 8) + typedef int64_t intptr_t; + #endif + + #define _intptr_t_defined + #define _INTPTR_T_DEFINED +#endif + +#if !defined(_UINTPTR_T_DEFINED) && !defined(_uintptr_t_defined) && !defined(EA_COMPILER_HAS_C99_TYPES) + #if (EA_PLATFORM_PTR_SIZE == 4) + typedef uint32_t uintptr_t; + #elif (EA_PLATFORM_PTR_SIZE == 8) + typedef uint64_t uintptr_t; + #endif + + #define _uintptr_t_defined + #define _UINTPTR_T_DEFINED +#endif + +#if !defined(EA_COMPILER_HAS_INTTYPES) + #ifndef INTMAX_T_DEFINED + #define INTMAX_T_DEFINED + + // At this time, all supported compilers have int64_t as the max + // integer type. Some compilers support a 128 bit integer type, + // but in some cases it is not a true int128_t but rather a + // crippled data type. Also, it turns out that Unix 64 bit ABIs + // require that intmax_t be int64_t and nothing larger. So we + // play it safe here and set intmax_t to int64_t, even though + // an int128_t type may exist. + + typedef int64_t intmax_t; + typedef uint64_t uintmax_t; + #endif +#endif + + +// ------------------------------------------------------------------------ +// ssize_t +// signed equivalent to size_t. +// This is defined by GCC (except the QNX implementation of GCC) but not by other compilers. +// +#if !defined(__GNUC__) + // As of this writing, all non-GCC compilers significant to us implement + // uintptr_t the same as size_t. However, this isn't guaranteed to be + // so for all compilers, as size_t may be based on int, long, or long long. + #if !defined(_SSIZE_T_) && !defined(_SSIZE_T_DEFINED) + #define _SSIZE_T_ + #define _SSIZE_T_DEFINED + + #if defined(_MSC_VER) && (EA_PLATFORM_PTR_SIZE == 8) + typedef __int64 ssize_t; + #else + typedef long ssize_t; + #endif + #endif +#else + #include +#endif + + +// ------------------------------------------------------------------------ +// Character types +// +#if defined(EA_COMPILER_MSVC) + #if defined(EA_WCHAR_T_NON_NATIVE) + // In this case, wchar_t is not defined unless we include + // wchar.h or if the compiler makes it built-in. + #ifdef EA_COMPILER_MSVC + #pragma warning(push, 3) + #endif + #include + #ifdef EA_COMPILER_MSVC + #pragma warning(pop) + #endif + #endif +#endif + + +// ------------------------------------------------------------------------ +// char8_t -- Guaranteed to be equal to the compiler's char data type. +// Some compilers implement char8_t as unsigned, though char +// is usually set to be signed. +// +// char16_t -- This is set to be an unsigned 16 bit value. If the compiler +// has wchar_t as an unsigned 16 bit value, then char16_t is +// set to be the same thing as wchar_t in order to allow the +// user to use char16_t with standard wchar_t functions. +// +// char32_t -- This is set to be an unsigned 32 bit value. If the compiler +// has wchar_t as an unsigned 32 bit value, then char32_t is +// set to be the same thing as wchar_t in order to allow the +// user to use char32_t with standard wchar_t functions. +// +// EA_CHAR8_UNIQUE +// EA_CHAR16_NATIVE +// EA_CHAR32_NATIVE +// EA_WCHAR_UNIQUE +// +// VS2010 unilaterally defines char16_t and char32_t in its yvals.h header +// unless _HAS_CHAR16_T_LANGUAGE_SUPPORT or _CHAR16T are defined. +// However, VS2010 does not support the C++0x u"" and U"" string literals, +// which makes its definition of char16_t and char32_t somewhat useless. +// Until VC++ supports string literals, the build system should define +// _CHAR16T and let EABase define char16_t and EA_CHAR16. +// +// GCC defines char16_t and char32_t in the C compiler in -std=gnu99 mode, +// as __CHAR16_TYPE__ and __CHAR32_TYPE__, and for the C++ compiler +// in -std=c++0x and -std=gnu++0x modes, as char16_t and char32_t too. +// +// The EA_WCHAR_UNIQUE symbol is defined to 1 if wchar_t is distinct from +// char8_t, char16_t, and char32_t, and defined to 0 if not. In some cases, +// if the compiler does not support char16_t/char32_t, one of these two types +// is typically a typedef or define of wchar_t. For compilers that support +// the C++11 unicode character types often overloads must be provided to +// support existing code that passes a wide char string to a function that +// takes a unicode string. +// +// The EA_CHAR8_UNIQUE symbol is defined to 1 if char8_t is distinct type +// from char in the type system, and defined to 0 if otherwise. + +#if !defined(EA_CHAR16_NATIVE) + // To do: Change this to be based on EA_COMPILER_NO_NEW_CHARACTER_TYPES. + #if defined(_MSC_VER) && (_MSC_VER >= 1600) && defined(_HAS_CHAR16_T_LANGUAGE_SUPPORT) && _HAS_CHAR16_T_LANGUAGE_SUPPORT // VS2010+ + #define EA_CHAR16_NATIVE 1 + #elif defined(EA_COMPILER_CLANG) && defined(EA_COMPILER_CPP11_ENABLED) + #if __has_feature(cxx_unicode_literals) + #define EA_CHAR16_NATIVE 1 + #elif (EA_COMPILER_VERSION >= 300) && !(defined(EA_PLATFORM_IPHONE) || defined(EA_PLATFORM_OSX)) + #define EA_CHAR16_NATIVE 1 + #elif defined(EA_PLATFORM_APPLE) + #define EA_CHAR16_NATIVE 1 + #else + #define EA_CHAR16_NATIVE 0 + #endif + #elif defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 404) && defined(__CHAR16_TYPE__) && defined(EA_COMPILER_CPP11_ENABLED)// EDG 4.4+. + #define EA_CHAR16_NATIVE 1 + #elif defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4004) && !defined(EA_COMPILER_EDG) && (defined(EA_COMPILER_CPP11_ENABLED) || defined(__STDC_VERSION__)) // g++ (C++ compiler) 4.4+ with -std=c++0x or gcc (C compiler) 4.4+ with -std=gnu99 + #define EA_CHAR16_NATIVE 1 + #else + #define EA_CHAR16_NATIVE 0 + #endif +#endif + +#if !defined(EA_CHAR32_NATIVE) // Microsoft currently ties char32_t language support to char16_t language support. So we use CHAR16_T here. + // To do: Change this to be based on EA_COMPILER_NO_NEW_CHARACTER_TYPES. + #if defined(_MSC_VER) && (_MSC_VER >= 1600) && defined(_HAS_CHAR16_T_LANGUAGE_SUPPORT) && _HAS_CHAR16_T_LANGUAGE_SUPPORT // VS2010+ + #define EA_CHAR32_NATIVE 1 + #elif defined(EA_COMPILER_CLANG) && defined(EA_COMPILER_CPP11_ENABLED) + #if __has_feature(cxx_unicode_literals) + #define EA_CHAR32_NATIVE 1 + #elif (EA_COMPILER_VERSION >= 300) && !(defined(EA_PLATFORM_IPHONE) || defined(EA_PLATFORM_OSX)) + #define EA_CHAR32_NATIVE 1 + #elif defined(EA_PLATFORM_APPLE) + #define EA_CHAR32_NATIVE 1 + #else + #define EA_CHAR32_NATIVE 0 + #endif + #elif defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 404) && defined(__CHAR32_TYPE__) && defined(EA_COMPILER_CPP11_ENABLED)// EDG 4.4+. + #define EA_CHAR32_NATIVE 1 + #elif defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4004) && !defined(EA_COMPILER_EDG) && (defined(EA_COMPILER_CPP11_ENABLED) || defined(__STDC_VERSION__)) // g++ (C++ compiler) 4.4+ with -std=c++0x or gcc (C compiler) 4.4+ with -std=gnu99 + #define EA_CHAR32_NATIVE 1 + #else + #define EA_CHAR32_NATIVE 0 + #endif +#endif + + +#if EA_CHAR16_NATIVE || EA_CHAR32_NATIVE + #define EA_WCHAR_UNIQUE 1 +#else + #define EA_WCHAR_UNIQUE 0 +#endif + + +// EA_CHAR8_UNIQUE +// +// Check for char8_t support in the cpp type system. Moving forward from c++20, +// the char8_t type allows users to overload function for character encoding. +// +// EA_CHAR8_UNIQUE is 1 when the type is a unique in the type system and +// can there be used as a valid overload. EA_CHAR8_UNIQUE is 0 otherwise. +// +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0482r6.html +// +#ifdef __cpp_char8_t + #define CHAR8_T_DEFINED + #define EA_CHAR8_UNIQUE 1 +#else + #define EA_CHAR8_UNIQUE 0 +#endif + + +#ifndef CHAR8_T_DEFINED // If the user hasn't already defined these... + #define CHAR8_T_DEFINED + #if defined(EA_PLATFORM_APPLE) + #define char8_t char // The Apple debugger is too stupid to realize char8_t is typedef'd to char, so we #define it. + #else + typedef char char8_t; + #endif + + #if EA_CHAR16_NATIVE + // In C++, char16_t and char32_t are already defined by the compiler. + // In MS C, char16_t and char32_t are already defined by the compiler/standard library. + // In GCC C, __CHAR16_TYPE__ and __CHAR32_TYPE__ are defined instead, and we must define char16_t and char32_t from these. + #if defined(__GNUC__) && !defined(__GXX_EXPERIMENTAL_CXX0X__) && defined(__CHAR16_TYPE__) // If using GCC and compiling in C... + typedef __CHAR16_TYPE__ char16_t; + typedef __CHAR32_TYPE__ char32_t; + #endif + #elif (EA_WCHAR_SIZE == 2) + #if (defined(_MSC_VER) && (_MSC_VER >= 1600)) // if VS2010+ or using platforms that use Dinkumware under a compiler that doesn't natively support C++11 char16_t. + #if !defined(_CHAR16T) + #define _CHAR16T + #endif + #if !defined(_HAS_CHAR16_T_LANGUAGE_SUPPORT) || !_HAS_CHAR16_T_LANGUAGE_SUPPORT + typedef wchar_t char16_t; + typedef uint32_t char32_t; + #endif + #else + typedef wchar_t char16_t; + typedef uint32_t char32_t; + #endif + #else + typedef uint16_t char16_t; + #if defined(__cplusplus) + typedef wchar_t char32_t; + #else + typedef uint32_t char32_t; + #endif + #endif +#endif + + +// CHAR8_MIN, CHAR8_MAX, etc. +// +#define EA_LIMITS_DIGITS_S(T) ((sizeof(T) * 8) - 1) +#define EA_LIMITS_DIGITS_U(T) ((sizeof(T) * 8)) +#define EA_LIMITS_DIGITS(T) ((EA_LIMITS_IS_SIGNED(T) ? EA_LIMITS_DIGITS_S(T) : EA_LIMITS_DIGITS_U(T))) +#define EA_LIMITS_IS_SIGNED(T) ((T)(-1) < 0) +#define EA_LIMITS_MIN_S(T) ((T)((T)1 << EA_LIMITS_DIGITS_S(T))) +#define EA_LIMITS_MIN_U(T) ((T)0) +#define EA_LIMITS_MIN(T) ((EA_LIMITS_IS_SIGNED(T) ? EA_LIMITS_MIN_S(T) : EA_LIMITS_MIN_U(T))) +#define EA_LIMITS_MAX_S(T) ((T)(((((T)1 << (EA_LIMITS_DIGITS(T) - 1)) - 1) << 1) + 1)) +#define EA_LIMITS_MAX_U(T) ((T)~(T)0) +#define EA_LIMITS_MAX(T) ((EA_LIMITS_IS_SIGNED(T) ? EA_LIMITS_MAX_S(T) : EA_LIMITS_MAX_U(T))) + +#if !defined(CHAR8_MIN) + #define CHAR8_MIN EA_LIMITS_MIN(char8_t) +#endif + +#if !defined(CHAR8_MAX) + #define CHAR8_MAX EA_LIMITS_MAX(char8_t) +#endif + +#if !defined(CHAR16_MIN) + #define CHAR16_MIN EA_LIMITS_MIN(char16_t) +#endif + +#if !defined(CHAR16_MAX) + #define CHAR16_MAX EA_LIMITS_MAX(char16_t) +#endif + +#if !defined(CHAR32_MIN) + #define CHAR32_MIN EA_LIMITS_MIN(char32_t) +#endif + +#if !defined(CHAR32_MAX) + #define CHAR32_MAX EA_LIMITS_MAX(char32_t) +#endif + + + +// EA_CHAR8 / EA_CHAR16 / EA_CHAR32 / EA_WCHAR +// +// Supports usage of portable string constants. +// +// Example usage: +// const char16_t* str = EA_CHAR16("Hello world"); +// const char32_t* str = EA_CHAR32("Hello world"); +// const char16_t c = EA_CHAR16('\x3001'); +// const char32_t c = EA_CHAR32('\x3001'); +// +#ifndef EA_CHAR8 + #if EA_CHAR8_UNIQUE + #define EA_CHAR8(s) u8 ## s + #else + #define EA_CHAR8(s) s + #endif +#endif + +#ifndef EA_WCHAR + #define EA_WCHAR_(s) L ## s + #define EA_WCHAR(s) EA_WCHAR_(s) +#endif + +#ifndef EA_CHAR16 + #if EA_CHAR16_NATIVE && !defined(_MSC_VER) // Microsoft doesn't support char16_t string literals. + #define EA_CHAR16_(s) u ## s + #define EA_CHAR16(s) EA_CHAR16_(s) + #elif (EA_WCHAR_SIZE == 2) + #if defined(_MSC_VER) && (_MSC_VER >= 1900) && defined(__cplusplus) // VS2015 supports u"" string literals. + #define EA_CHAR16_(s) u ## s + #define EA_CHAR16(s) EA_CHAR16_(s) + #else + #define EA_CHAR16_(s) L ## s + #define EA_CHAR16(s) EA_CHAR16_(s) + #endif + #else + //#define EA_CHAR16(s) // Impossible to implement efficiently. + #endif +#endif + +#ifndef EA_CHAR32 + #if EA_CHAR32_NATIVE && !defined(_MSC_VER) // Microsoft doesn't support char32_t string literals. + #define EA_CHAR32_(s) U ## s + #define EA_CHAR32(s) EA_CHAR32_(s) + #elif (EA_WCHAR_SIZE == 2) + #if defined(_MSC_VER) && (_MSC_VER >= 1900) && defined(__cplusplus) // VS2015 supports u"" string literals. + #define EA_CHAR32_(s) U ## s + #define EA_CHAR32(s) EA_CHAR32_(s) + #else + //#define EA_CHAR32(s) // Impossible to implement. + #endif + #elif (EA_WCHAR_SIZE == 4) + #define EA_CHAR32_(s) L ## s + #define EA_CHAR32(s) EA_CHAR32_(s) + #else + #error Unexpected size of wchar_t + #endif +#endif + +// EAText8 / EAText16 +// +// Provided for backwards compatibility with older code. +// +#if defined(EABASE_ENABLE_EATEXT_MACROS) + #define EAText8(x) x + #define EAChar8(x) x + + #define EAText16(x) EA_CHAR16(x) + #define EAChar16(x) EA_CHAR16(x) +#endif + + + + +// ------------------------------------------------------------------------ +// EAArrayCount +// +// Returns the count of items in a built-in C array. This is a common technique +// which is often used to help properly calculate the number of items in an +// array at runtime in order to prevent overruns, etc. +// +// Example usage: +// int array[75]; +// size_t arrayCount = EAArrayCount(array); // arrayCount is 75. +// +#if defined(EA_COMPILER_NO_CONSTEXPR) + #ifndef EAArrayCount + #define EAArrayCount(x) (sizeof(x) / sizeof(x[0])) + #endif +#else + // This C++11 version is a little smarter than the macro version above; + // it can tell the difference between arrays and pointers. Other simpler + // templated versions have failed in various subtle ways. + + template + char (&EAArraySizeHelper(T (&x)[N]))[N]; + + template + char (&EAArraySizeHelper(T (&&x)[N]))[N]; + + #define EAArrayCount(x) (sizeof(EAArraySizeHelper(x))) +#endif + + +// ------------------------------------------------------------------------ +// static_assert +// +// C++11 static_assert (a.k.a. compile-time assert). +// +// Specification: +// void static_assert(bool const_expression, const char* description); +// +// Example usage: +// static_assert(sizeof(int) == 4, "int must be 32 bits"); +// +#if defined(_MSC_VER) && (_MSC_VER >= 1600) && defined(__cplusplus) + // static_assert is defined by the compiler for both C and C++. +#elif !defined(__cplusplus) && defined(EA_PLATFORM_ANDROID) && ((defined(__STDC_VERSION__) && __STDC_VERSION__ < 201100L) || !defined(__STDC_VERSION__)) + // AndroidNDK does not support static_assert despite claiming it's a C11 compiler + #define NEED_CUSTOM_STATIC_ASSERT +#elif defined(__clang__) && defined(__cplusplus) + // We need to separate these checks on a new line, as the pre-processor on other compilers will fail on the _has_feature macros + #if !(__has_feature(cxx_static_assert) || __has_extension(cxx_static_assert)) + #define NEED_CUSTOM_STATIC_ASSERT + #endif +#elif defined(__GNUC__) && (defined(__GXX_EXPERIMENTAL_CXX0X__) || (defined(__cplusplus) && (__cplusplus >= 201103L))) + // static_assert is defined by the compiler. +#elif defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 401) && defined(EA_COMPILER_CPP11_ENABLED) + // static_assert is defined by the compiler. +#elif !defined(__cplusplus) && defined(__GLIBC__) && defined(__USE_ISOC11) + // static_assert is defined by the compiler. +#elif !defined(__cplusplus) && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201100L + // static_assert is defined by the compiler. +#else + #define NEED_CUSTOM_STATIC_ASSERT +#endif + +#ifdef NEED_CUSTOM_STATIC_ASSERT + #ifdef __GNUC__ + // On GCC the 'unused' attribute can be used to indicate a typedef is not actually used + // (such as in the static_assert implementation below). New versions of GCC generate + // warnings for unused typedefs in function/method scopes. + #define EA_STATIC_ASSERT_UNUSED_ATTRIBUTE __attribute__((unused)) + #else + #define EA_STATIC_ASSERT_UNUSED_ATTRIBUTE + #endif + #define EA_STATIC_ASSERT_TOKEN_PASTE(a,b) a ## b + #define EA_STATIC_ASSERT_CONCATENATE_HELPER(a,b) EA_STATIC_ASSERT_TOKEN_PASTE(a,b) + + #if defined(__COUNTER__) // If this extension is available, which allows multiple statements per line... + #define static_assert(expression, description) typedef char EA_STATIC_ASSERT_CONCATENATE_HELPER(compileTimeAssert,__COUNTER__) [((expression) != 0) ? 1 : -1] EA_STATIC_ASSERT_UNUSED_ATTRIBUTE + #else + #define static_assert(expression, description) typedef char EA_STATIC_ASSERT_CONCATENATE_HELPER(compileTimeAssert,__LINE__) [((expression) != 0) ? 1 : -1] EA_STATIC_ASSERT_UNUSED_ATTRIBUTE + #endif + + #undef NEED_CUSTOM_STATIC_ASSERT +#endif + +// ------------------------------------------------------------------------ +// EA_IS_ENABLED +// +// EA_IS_ENABLED is intended to be used for detecting if compile time features are enabled or disabled. +// +// It has some advantages over using a standard #if or #ifdef tests: +// 1) Fails to compile when passes numeric macro values. Valid options are strictly enabled or disabled. +// 2) Fails to compile when passed undefined macro values rather than disabling by default +// 3) Fails to compile when the passed macro is defined to but empty +// +// To use the macro, the calling code should create a define for the feature to enable or disable. This feature define +// must be set to either EA_ENABLED or EA_DISABLED. (Do not try to set the feature define directly to some other +// value.) +// +// Note: These macros are analogous to the Frostbite macro FB_USING used in combination with FB_OFF / FB_ON and are +// designed to be compatible to support gradual migration. +// +// Example usage: +// +// // The USER_PROVIDED_FEATURE_DEFINE should be defined as either +// // EA_ENABLED or EA_DISABLED. +// #define USER_PROVIDED_FEATURE_DEFINE EA_ENABLED +// +// #if EA_IS_ENABLED(USER_PROVIDED_FEATURE_DEFINE) +// // USER_PROVIDED_FEATURE_DEFINE is enabled +// #else +// // USER_PROVIDED_FEATURE_DEFINE is disabled +// #endif +// +#define EA_ENABLED 111- +#define EA_DISABLED 333- +// NOTE: Numeric values for x will produce a parse error while empty values produce a divide by zero, and the test is a bool for proper negation behavior +#define EA_IS_ENABLED(x) (333 == 333 * 111 / ((x 0) * (((x 0) == 333 ? 1 : 0) + ((x 0) == 111 ? 1 : 0)))) + + + +// Define int128_t / uint128_t types. +// NOTE(rparolin): include file at the end because we want all the signed integral types defined. +#ifdef __cplusplus + #include +#endif + +#endif // Header include guard + + + + diff --git a/external/EASTL/include/EABase/eadeprecated.h b/external/EASTL/include/EABase/eadeprecated.h new file mode 100644 index 00000000..72941a88 --- /dev/null +++ b/external/EASTL/include/EABase/eadeprecated.h @@ -0,0 +1,252 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + +#ifndef EABASE_EADEPRECATED_H +#define EABASE_EADEPRECATED_H + +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// Documentation on deprecated attribute: https://en.cppreference.com/w/cpp/language/attributes/deprecated +// Documentation on SimVer version numbers: http://simver.org/ +// +// These macros provide a structured formatting to C++ deprecated annotation messages. This ensures +// that the required information is presented in a standard format for developers and tools. +// +// Example usage: +// +// Current package version : current_ver +// Future version for code removal : major_ver, minor_ver, change_ver +// Deprecation comment : "" +// +// EA_DEPRECATED_API(current_ver, major_ver, minor_ver, change_ver, tag, "Do not use deprecated function") +// void TestFunc() {} +// +// EA_DEPRECATED_API(current_ver, major_ver, minor_ver, change_ver, tag, "Do not use deprecated typedef") +// typedef int TestTypedef; +// +// EA_DEPRECATED_API(current_ver, major_ver, minor_ver, change_ver, tag, "Do not use deprecated variable") +// int TestVariable; +// +// EA_DEPRECATED_STRUCT(current_ver, major_ver, minor_ver, change_ver, tag, "Do not use deprecated struct") +// TestStruct {}; +// +// EA_DEPRECATED_CLASS(current_ver, major_ver, minor_ver, change_ver, tag, "Do not use deprecated class") +// TestClass {}; +// +// union TestUnion +// { +// EA_DEPRECATED_API(current_ver, major_ver, minor_ver, change_ver, tag, "Do not use deprecated data member") int n; +// }; +// +// EA_DEPRECATED_ENUM(current_ver, major_ver, minor_ver, change_ver, tag, "Do not use deprecated enumeration") +// TestEnumeration { TestEnumeration_Value1, TestEnumeration_Value2 }; +// +// enum TestEnumerator +// { +// EA_DEPRECATED_ENUMVALUE(TestEnumerator_Value1, current_ver, major_ver, minor_ver, change_ver, tag, "Do not use deprecated enum value") = 5, +// TestEnumerator_Value2 = 4 +// }; +// +// EA_DISABLE_DEPRECATED(current_ver, major_ver, minor_ver, change_ver, tag, "Suppress the deprecated warning until the given Release") +// void TestFunc() {} +// EA_RESTORE_DEPRECATED() +// + +///////////////////////////////////////////////////////////////////////////////// +// +// Create an integer version number which can be compared with numerical operators +// +#define EA_CREATE_VERSION(MAJOR, MINOR, PATCH) \ + (((MAJOR) * 1000000) + (((MINOR) + 1) * 10000) + (((PATCH) + 1) * 100)) + + +///////////////////////////////////////////////////////////////////////////////// +// +// INTERNAL MACROS - DO NOT USE DIRECTLY +// +// When EA_DEPRECATED_API_EXPIRED_IS_ERROR this macro produce a static asset on code that is past the expiry date. +// +#if defined(EA_DEPRECATED_API_EXPIRED_IS_ERROR) && EA_DEPRECATED_API_EXPIRED_IS_ERROR +#define EA_INTERNAL_DEPRECATED_BEFORETYPE(_moduleVersion, _major_version, _minor_version, _patch_version, _annotation) \ + static_assert(_moduleVersion < EA_CREATE_VERSION(_major_version,_minor_version,_patch_version), "This API has been deprecated and needs to be removed"); +#else + #define EA_INTERNAL_DEPRECATED_BEFORETYPE(_moduleVersion, _major_version, _minor_version, _patch_version, _annotation) +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// INTERNAL MACROS - DO NOT USE DIRECTLY +// +// When EA_IGNORE_DEPRECATION is set deprecation annotation will not be produced +// +#if defined(EA_IGNORE_DEPRECATION) && EA_IGNORE_DEPRECATION + #define EA_INTERNAL_DEPRECATED_AFTERTYPE(_major_version, _minor_version, _patch_version, _annotation, _msg) +#else +#define EA_INTERNAL_DEPRECATED_AFTERTYPE(_major_version, _minor_version, _patch_version, _annotation, _msg) \ + EA_DEPRECATED_MESSAGE(_msg. This API will be removed in _major_version._minor_version._patch_version _annotation) +#endif + +///////////////////////////////////////////////////////////////////////////////// +// +// INTERNAL MACROS - DO NOT USE DIRECTLY +// +// Simple case +// +#define EA_INTERNAL_DEPRECATED_SIMPLE(_moduleVersion, _major_version, _minor_version, _patch_version, _annotation, _msg) \ + EA_INTERNAL_DEPRECATED_BEFORETYPE(_moduleVersion, _major_version, _minor_version, _patch_version, _annotation) \ + EA_INTERNAL_DEPRECATED_AFTERTYPE(_major_version, _minor_version, _patch_version, _annotation, _msg) + + +// ------------------------------------------------------------------------ +// INTERNAL MACROS - DO NOT USE DIRECTLY +// +// Macro which inserts the keyword to correctly format the deprecation annotation +#define EA_INTERNAL_DEPRECATED_TYPE(_moduleVersion, _major_version, _minor_version, _patch_version, _annotation, _msg, _keyword) \ + EA_INTERNAL_DEPRECATED_BEFORETYPE(_moduleVersion, _major_version, _minor_version, _patch_version, _annotation) \ + _keyword \ + EA_INTERNAL_DEPRECATED_AFTERTYPE(_major_version, _minor_version, _patch_version, _annotation, _msg) + + + +///////////////////////////////////////////////////////////////////////////////// +// +// PUBLIC MACROS +// See file header comment for example usage. +// + +///////////////////////////////////////////////////////////////////////////////// +// +// EA_DEPRECATED_API(current_ver, major_ver, minor_ver, change_ver, tag, "Do not use deprecated function") +// void TestFunc() {} +// +// EA_DEPRECATED_API(current_ver, major_ver, minor_ver, change_ver, tag, "Do not use deprecated typedef") +// typedef int TestTypedef; +// +// EA_DEPRECATED_API(current_ver, major_ver, minor_ver, change_ver, tag, "Do not use deprecated variable") +// int TestVariable; +// +#define EA_DEPRECATED_API(_moduleVersion, _major_version, _minor_version, _patch_version, _annotation, _msg) \ + EA_INTERNAL_DEPRECATED_SIMPLE(_moduleVersion, _major_version, _minor_version, _patch_version, _annotation, _msg) + + +///////////////////////////////////////////////////////////////////////////////// +// +// EA_DEPRECATED_STRUCT(current_ver, major_ver, minor_ver, change_ver, tag, "Do not use deprecated struct") +// TestStruct {}; +// +#define EA_DEPRECATED_STRUCT(_moduleVersion, _major_version, _minor_version, _patch_version, _annotation, _msg) \ + EA_INTERNAL_DEPRECATED_TYPE(_moduleVersion, _major_version, _minor_version, _patch_version, _annotation, _msg, struct) + + +///////////////////////////////////////////////////////////////////////////////// +// +// EA_DEPRECATED_CLASS(current_ver, major_ver, minor_ver, change_ver, tag, "Do not use deprecated class") +// TestClass {}; +// +#define EA_DEPRECATED_CLASS(_moduleVersion, _major_version, _minor_version, _patch_version, _annotation, _msg) \ + EA_INTERNAL_DEPRECATED_TYPE(_moduleVersion, _major_version, _minor_version, _patch_version, _annotation, _msg, class) + + +///////////////////////////////////////////////////////////////////////////////// +// +// EA_DEPRECATED_ENUM(current_ver, major_ver, minor_ver, change_ver, tag, "Do not use deprecated enumeration") +// TestEnumeration { TestEnumeration_Value1, TestEnumeration_Value2 }; +// +#define EA_DEPRECATED_ENUM(_moduleVersion, _major_version, _minor_version, _patch_version, _annotation, _msg) \ + EA_INTERNAL_DEPRECATED_TYPE(_moduleVersion, _major_version, _minor_version, _patch_version, _annotation, _msg, enum) + + +///////////////////////////////////////////////////////////////////////////////// +// +// enum TestEnumerator +// { +// EA_DEPRECATED_ENUMVALUE(TestEnumerator_Value1, current_ver, major_ver, minor_ver, change_ver, tag, "Do not use deprecated enum value") = 5, +// TestEnumerator_Value2 = 4 +// }; +// +#define EA_DEPRECATED_ENUMVALUE(_value, _moduleVersion, _major_version, _minor_version, _patch_version, _annotation, _msg) \ + _value EA_INTERNAL_DEPRECATED_AFTERTYPE(_major_version, _minor_version, _patch_version, _annotation, _msg) + + +///////////////////////////////////////////////////////////////////////////////// +// +// Suppress deprecated warnings around a block of code, see file comment for full usage. +// EA_DISABLE_DEPRECATED(current_ver, major_ver, minor_ver, change_ver, tag, "Suppress the deprecated warning until the given Release") +// +#define EA_DISABLE_DEPRECATED(_moduleVersion, _major_version, _minor_version, _patch_version, _annotation, _msg) \ + EA_INTERNAL_DEPRECATED_BEFORETYPE(_moduleVersion, _major_version, _minor_version, _patch_version, _annotation) \ + EA_DISABLE_VC_WARNING(4996); \ + EA_DISABLE_CLANG_WARNING(-Wdeprecated-declarations); + +///////////////////////////////////////////////////////////////////////////////// +// +// Restore the compiler warnings +// EA_RESTORE_DEPRECATED() +// +#define EA_RESTORE_DEPRECATED() \ + EA_RESTORE_CLANG_WARNING(); \ + EA_RESTORE_VC_WARNING(); + + +///////////////////////////////////////////////////////////////////////////////////// +// Some of our code doesn't have fixed cadence on when major/minor/patch versions are updated, the +// following macros are for use when the deprecation window needs to be independent from the version +// numbers. We'll be providing these as needed in six months increments. + + +// EA_DEPRECATIONS_FOR_2024_APRIL +// This macro is provided as a means to disable warnings temporarily (in particular if a user is compiling with warnings as errors). +// All deprecations raised by this macro (when it is EA_ENABLED) are scheduled for removal approximately April 2024. +#ifndef EA_DEPRECATIONS_FOR_2024_APRIL + #define EA_DEPRECATIONS_FOR_2024_APRIL EA_ENABLED +#endif + +#if EA_IS_ENABLED(EA_DEPRECATIONS_FOR_2024_APRIL) + #define EA_REMOVE_AT_2024_APRIL EA_DEPRECATED + #define EA_REMOVE_AT_2024_APRIL_MSG EA_DEPRECATED_MESSAGE +#else + #define EA_REMOVE_AT_2024_APRIL + #define EA_REMOVE_AT_2024_APRIL_MSG(msg) +#endif + +// EA_DEPRECATIONS_FOR_2024_SEPT +// This macro is provided as a means to disable warnings temporarily (in particular if a user is compiling with warnings as errors). +// All deprecations raised by this macro (when it is EA_ENABLED) are scheduled for removal approximately September 2024. +#ifndef EA_DEPRECATIONS_FOR_2024_SEPT + #define EA_DEPRECATIONS_FOR_2024_SEPT EA_ENABLED +#endif + +#if EA_IS_ENABLED(EA_DEPRECATIONS_FOR_2024_SEPT) + #define EA_REMOVE_AT_2024_SEPT EA_DEPRECATED + #define EA_REMOVE_AT_2024_SEPT_MSG EA_DEPRECATED_MESSAGE +#else + #define EA_REMOVE_AT_2024_SEPT + #define EA_REMOVE_AT_2024_SEPT_MSG(msg) +#endif + +// EA_DEPRECATIONS_FOR_2025_APRIL +// This macro is provided as a means to disable warnings temporarily (in particular if a user is compiling with warnings as errors). +// All deprecations raised by this macro (when it is EA_ENABLED) are scheduled for removal approximately April 2025. +#ifndef EA_DEPRECATIONS_FOR_2025_APRIL + #define EA_DEPRECATIONS_FOR_2025_APRIL EA_ENABLED +#endif + +#if EA_IS_ENABLED(EA_DEPRECATIONS_FOR_2025_APRIL) + #define EA_REMOVE_AT_2025_APRIL EA_DEPRECATED + #define EA_REMOVE_AT_2025_APRIL_MSG EA_DEPRECATED_MESSAGE +#else + #define EA_REMOVE_AT_2025_APRIL + #define EA_REMOVE_AT_2025_APRIL_MSG(msg) +#endif + + +#endif /* EABASE_EADEPRECATED_H */ diff --git a/external/EASTL/include/EABase/eahave.h b/external/EASTL/include/EABase/eahave.h new file mode 100644 index 00000000..b0987be7 --- /dev/null +++ b/external/EASTL/include/EABase/eahave.h @@ -0,0 +1,877 @@ +/*----------------------------------------------------------------------------- + * eahave.h + * + * Copyright (c) Electronic Arts Inc. All rights reserved. + *---------------------------------------------------------------------------*/ + + +/*----------------------------------------------------------------------------- + This file's functionality is preliminary and won't be considered stable until + a future EABase version. + *---------------------------------------------------------------------------*/ + + +/*----------------------------------------------------------------------------- + This header identifies if the given facilities are available in the + standard build environment the current compiler/linker/standard library/ + operating system combination. This file may in some cases #include standard + headers in order to make availability determinations, such as to check + compiler or SDK version numbers. However, it cannot be perfect. + This header does not identify compiler features, as those are defined in + eacompiler.h and eacompilertraits.h. Rather this header is about library support. + This header does not identify platform or library conventions either, such + as whether the file paths use \ or / for directory separators. + + We provide three types of HAVE features here: + + - EA_HAVE_XXX_FEATURE - Have compiler feature. + Identifies if the compiler has or lacks some feature in the + current build. Sometimes you need to check to see if the + compiler is running in some mode in able to write portable code + against it. For example, some compilers (e.g. VC++) have a + mode in which all language extensions are disabled. If you want + to write code that works with that but still uses the extensions + when available then you can check #if defined(EA_HAVE_EXTENSIONS_FEATURE). + Features can be forcibly cancelled via EA_NO_HAVE_XXX_FEATURE. + EA_NO_HAVE is useful for a build system or user to override the + defaults because it happens to know better. + + - EA_HAVE_XXX_H - Have header file information. + Identifies if a given header file is available to the current + compile configuration. For example, some compilers provide a + malloc.h header, while others don't. For the former we define + EA_HAVE_MALLOC_H, while for the latter it remains undefined. + If a header is missing then it may still be that the functions + the header usually declares are declared in some other header. + EA_HAVE_XXX does not include the possibility that our own code + provides versions of these headers, and in fact a purpose of + EA_HAVE_XXX is to decide if we should be using our own because + the system doesn't provide one. + Header availability can be forcibly cancelled via EA_NO_HAVE_XXX_H. + EA_NO_HAVE is useful for a build system or user to override the + defaults because it happens to know better. + + - EA_HAVE_XXX_DECL - Have function declaration information. + Identifies if a given function declaration is provided by + the current compile configuration. For example, some compiler + standard libraries declare a wcslen function, while others + don't. For the former we define EA_HAVE_WCSLEN_DECL, while for + the latter it remains undefined. If a declaration of a function + is missing then we assume the implementation is missing as well. + EA_HAVE_XXX_DECL does not include the possibility that our + own code provides versions of these declarations, and in fact a + purpose of EA_HAVE_XXX_DECL is to decide if we should be using + our own because the system doesn't provide one. + Declaration availability can be forcibly cancelled via EA_NO_HAVE_XXX_DECL. + EA_NO_HAVE is useful for a build system or user to override the + defaults because it happens to know better. + + - EA_HAVE_XXX_IMPL - Have function implementation information. + Identifies if a given function implementation is provided by + the current compile and link configuration. For example, it's + commonly the case that console platforms declare a getenv function + but don't provide a linkable implementation. + In this case the user needs to provide such a function manually + as part of the link. If the implementation is available then + we define EA_HAVE_GETENV_IMPL, otherwise it remains undefined. + Beware that sometimes a function may not seem to be present in + the Standard Library but in reality you need to link some auxiliary + provided library for it. An example of this is the Unix real-time + functions such as clock_gettime. + EA_HAVE_XXX_IMPL does not include the possibility that our + own code provides versions of these implementations, and in fact a + purpose of EA_HAVE_XXX_IMPL is to decide if we should be using + our own because the system doesn't provide one. + Implementation availability can be forcibly cancelled via EA_NO_HAVE_XXX_IMPL. + EA_NO_HAVE is useful for a build system or user to override the + defaults because it happens to know better. + + It's not practical to define EA_HAVE macros for every possible header, + declaration, and implementation, and so the user must simply know that + some headers, declarations, and implementations tend to require EA_HAVE + checking. Nearly every C Standard Library we've seen has a + header, a strlen declaration, and a linkable strlen implementation, + so there's no need to provide EA_HAVE support for this. On the other hand + it's commonly the case that the C Standard Library doesn't have a malloc.h + header or an inet_ntop declaration. + +---------------------------------------------------------------------------*/ + + +#ifndef INCLUDED_eahave_H +#define INCLUDED_eahave_H + + +#include + + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + +/* EA_HAVE_XXX_FEATURE */ + +#if !defined(EA_HAVE_EXTENSIONS_FEATURE) && !defined(EA_NO_HAVE_EXTENSIONS_FEATURE) + #define EA_HAVE_EXTENSIONS_FEATURE 1 +#endif + + +/* EA_HAVE_XXX_LIBRARY */ + +// Dinkumware +#if !defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && !defined(EA_NO_HAVE_DINKUMWARE_CPP_LIBRARY) + #if defined(__cplusplus) + EA_DISABLE_ALL_VC_WARNINGS() + #include // Need to trigger the compilation of yvals.h without directly using because it might not exist. + EA_RESTORE_ALL_VC_WARNINGS() + #endif + + #if defined(__cplusplus) && defined(_CPPLIB_VER) /* If using the Dinkumware Standard library... */ + #define EA_HAVE_DINKUMWARE_CPP_LIBRARY 1 + #else + #define EA_NO_HAVE_DINKUMWARE_CPP_LIBRARY 1 + #endif +#endif + +// GCC libstdc++ +#if !defined(EA_HAVE_LIBSTDCPP_LIBRARY) && !defined(EA_NO_HAVE_LIBSTDCPP_LIBRARY) + #if defined(__GLIBCXX__) /* If using libstdc++ ... */ + #define EA_HAVE_LIBSTDCPP_LIBRARY 1 + #else + #define EA_NO_HAVE_LIBSTDCPP_LIBRARY 1 + #endif +#endif + +// Clang libc++ +#if !defined(EA_HAVE_LIBCPP_LIBRARY) && !defined(EA_NO_HAVE_LIBCPP_LIBRARY) + #if EA_HAS_INCLUDE_AVAILABLE + #if EA_HAS_INCLUDE(<__config>) + #define EA_HAVE_LIBCPP_LIBRARY 1 // We could also #include and check if defined(_LIBCPP_VERSION). + #endif + #endif + + #if !defined(EA_HAVE_LIBCPP_LIBRARY) + #define EA_NO_HAVE_LIBCPP_LIBRARY 1 + #endif +#endif + + +/* EA_HAVE_XXX_H */ + +// #include +#if !defined(EA_HAVE_SYS_TYPES_H) && !defined(EA_NO_HAVE_SYS_TYPES_H) + #define EA_HAVE_SYS_TYPES_H 1 +#endif + +// #include (and not sys/io.h or asm/io.h) +#if !defined(EA_HAVE_IO_H) && !defined(EA_NO_HAVE_IO_H) + // Unix doesn't have Microsoft's but has the same functionality in and . + #if defined(EA_PLATFORM_MICROSOFT) + #define EA_HAVE_IO_H 1 + #else + #define EA_NO_HAVE_IO_H 1 + #endif +#endif + +// #include +#if !defined(EA_HAVE_INTTYPES_H) && !defined(EA_NO_HAVE_INTTYPES_H) + #if !defined(EA_PLATFORM_MICROSOFT) + #define EA_HAVE_INTTYPES_H 1 + #else + #define EA_NO_HAVE_INTTYPES_H 1 + #endif +#endif + +// #include +#if !defined(EA_HAVE_UNISTD_H) && !defined(EA_NO_HAVE_UNISTD_H) + #if defined(EA_PLATFORM_UNIX) + #define EA_HAVE_UNISTD_H 1 + #else + #define EA_NO_HAVE_UNISTD_H 1 + #endif +#endif + +// #include +#if !defined(EA_HAVE_SYS_TIME_H) && !defined(EA_NO_HAVE_SYS_TIME_H) + #if !defined(EA_PLATFORM_MICROSOFT) && !defined(_CPPLIB_VER) /* _CPPLIB_VER indicates Dinkumware. */ + #define EA_HAVE_SYS_TIME_H 1 /* defines struct timeval */ + #else + #define EA_NO_HAVE_SYS_TIME_H 1 + #endif +#endif + +// #include +#if !defined(EA_HAVE_SYS_PTRACE_H) && !defined(EA_NO_HAVE_SYS_PTRACE_H) + #if defined(EA_PLATFORM_UNIX) && !defined(__CYGWIN__) && (defined(EA_PLATFORM_DESKTOP) || defined(EA_PLATFORM_SERVER)) + #define EA_HAVE_SYS_PTRACE_H 1 /* declares the ptrace function */ + #else + #define EA_NO_HAVE_SYS_PTRACE_H 1 + #endif +#endif + +// #include +#if !defined(EA_HAVE_SYS_STAT_H) && !defined(EA_NO_HAVE_SYS_STAT_H) + #if (defined(EA_PLATFORM_UNIX) && !(defined(EA_PLATFORM_SONY) && defined(EA_PLATFORM_CONSOLE))) || defined(__APPLE__) || defined(EA_PLATFORM_ANDROID) + #define EA_HAVE_SYS_STAT_H 1 /* declares the stat struct and function */ + #else + #define EA_NO_HAVE_SYS_STAT_H 1 + #endif +#endif + +// #include +#if !defined(EA_HAVE_LOCALE_H) && !defined(EA_NO_HAVE_LOCALE_H) + #define EA_HAVE_LOCALE_H 1 +#endif + +// #include +#if !defined(EA_HAVE_SIGNAL_H) && !defined(EA_NO_HAVE_SIGNAL_H) + #if !defined(EA_PLATFORM_BSD) && !defined(EA_PLATFORM_SONY) && !defined(CS_UNDEFINED_STRING) + #define EA_HAVE_SIGNAL_H 1 + #else + #define EA_NO_HAVE_SIGNAL_H 1 + #endif +#endif + +// #include +#if !defined(EA_HAVE_SYS_SIGNAL_H) && !defined(EA_NO_HAVE_SYS_SIGNAL_H) + #if defined(EA_PLATFORM_BSD) || defined(EA_PLATFORM_SONY) + #define EA_HAVE_SYS_SIGNAL_H 1 + #else + #define EA_NO_HAVE_SYS_SIGNAL_H 1 + #endif +#endif + +// #include +#if !defined(EA_HAVE_PTHREAD_H) && !defined(EA_NO_HAVE_PTHREAD_H) + #if defined(EA_PLATFORM_UNIX) || defined(EA_PLATFORM_APPLE) || defined(EA_PLATFORM_POSIX) + #define EA_HAVE_PTHREAD_H 1 /* It can be had under Microsoft/Windows with the http://sourceware.org/pthreads-win32/ library */ + #else + #define EA_NO_HAVE_PTHREAD_H 1 + #endif +#endif + +// #include +#if !defined(EA_HAVE_WCHAR_H) && !defined(EA_NO_HAVE_WCHAR_H) + #if defined(EA_PLATFORM_DESKTOP) && defined(EA_PLATFORM_UNIX) && defined(EA_PLATFORM_SONY) && defined(EA_PLATFORM_APPLE) + #define EA_HAVE_WCHAR_H 1 + #else + #define EA_NO_HAVE_WCHAR_H 1 + #endif +#endif + +// #include +#if !defined(EA_HAVE_MALLOC_H) && !defined(EA_NO_HAVE_MALLOC_H) + #if defined(_MSC_VER) || defined(__MINGW32__) + #define EA_HAVE_MALLOC_H 1 + #else + #define EA_NO_HAVE_MALLOC_H 1 + #endif +#endif + +// #include +#if !defined(EA_HAVE_ALLOCA_H) && !defined(EA_NO_HAVE_ALLOCA_H) + #if !defined(EA_HAVE_MALLOC_H) && !defined(EA_PLATFORM_SONY) + #define EA_HAVE_ALLOCA_H 1 + #else + #define EA_NO_HAVE_ALLOCA_H 1 + #endif +#endif + +// #include +#if !defined(EA_HAVE_EXECINFO_H) && !defined(EA_NO_HAVE_EXECINFO_H) + #if (defined(EA_PLATFORM_LINUX) || defined(EA_PLATFORM_OSX)) && !defined(EA_PLATFORM_ANDROID) + #define EA_HAVE_EXECINFO_H 1 + #else + #define EA_NO_HAVE_EXECINFO_H 1 + #endif +#endif + +// #include (Unix semaphore support) +#if !defined(EA_HAVE_SEMAPHORE_H) && !defined(EA_NO_HAVE_SEMAPHORE_H) + #if defined(EA_PLATFORM_UNIX) + #define EA_HAVE_SEMAPHORE_H 1 + #else + #define EA_NO_HAVE_SEMAPHORE_H 1 + #endif +#endif + +// #include (Unix semaphore support) +#if !defined(EA_HAVE_DIRENT_H) && !defined(EA_NO_HAVE_DIRENT_H) + #if defined(EA_PLATFORM_UNIX) && !defined(EA_PLATFORM_CONSOLE) + #define EA_HAVE_DIRENT_H 1 + #else + #define EA_NO_HAVE_DIRENT_H 1 + #endif +#endif + +// #include , , , +#if !defined(EA_HAVE_CPP11_CONTAINERS) && !defined(EA_NO_HAVE_CPP11_CONTAINERS) + #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 520) // Dinkumware. VS2010+ + #define EA_HAVE_CPP11_CONTAINERS 1 + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4004) // Actually GCC 4.3 supports array and unordered_ + #define EA_HAVE_CPP11_CONTAINERS 1 + #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1) + #define EA_HAVE_CPP11_CONTAINERS 1 + #else + #define EA_NO_HAVE_CPP11_CONTAINERS 1 + #endif +#endif + +// #include +#if !defined(EA_HAVE_CPP11_ATOMIC) && !defined(EA_NO_HAVE_CPP11_ATOMIC) + #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 540) // Dinkumware. VS2012+ + #define EA_HAVE_CPP11_ATOMIC 1 + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4007) + #define EA_HAVE_CPP11_ATOMIC 1 + #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1) + #define EA_HAVE_CPP11_ATOMIC 1 + #else + #define EA_NO_HAVE_CPP11_ATOMIC 1 + #endif +#endif + +// #include +#if !defined(EA_HAVE_CPP11_CONDITION_VARIABLE) && !defined(EA_NO_HAVE_CPP11_CONDITION_VARIABLE) + #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 540) // Dinkumware. VS2012+ + #define EA_HAVE_CPP11_CONDITION_VARIABLE 1 + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4007) + #define EA_HAVE_CPP11_CONDITION_VARIABLE 1 + #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1) + #define EA_HAVE_CPP11_CONDITION_VARIABLE 1 + #else + #define EA_NO_HAVE_CPP11_CONDITION_VARIABLE 1 + #endif +#endif + +// #include +#if !defined(EA_HAVE_CPP11_MUTEX) && !defined(EA_NO_HAVE_CPP11_MUTEX) + #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 540) // Dinkumware. VS2012+ + #define EA_HAVE_CPP11_MUTEX 1 + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4007) + #define EA_HAVE_CPP11_MUTEX 1 + #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1) + #define EA_HAVE_CPP11_MUTEX 1 + #else + #define EA_NO_HAVE_CPP11_MUTEX 1 + #endif +#endif + +// #include +#if !defined(EA_HAVE_CPP11_THREAD) && !defined(EA_NO_HAVE_CPP11_THREAD) + #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 540) // Dinkumware. VS2012+ + #define EA_HAVE_CPP11_THREAD 1 + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4007) + #define EA_HAVE_CPP11_THREAD 1 + #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1) + #define EA_HAVE_CPP11_THREAD 1 + #else + #define EA_NO_HAVE_CPP11_THREAD 1 + #endif +#endif + +// #include +#if !defined(EA_HAVE_CPP11_FUTURE) && !defined(EA_NO_HAVE_CPP11_FUTURE) + #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 540) // Dinkumware. VS2012+ + #define EA_HAVE_CPP11_FUTURE 1 + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4005) + #define EA_HAVE_CPP11_FUTURE 1 + #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1) + #define EA_HAVE_CPP11_FUTURE 1 + #else + #define EA_NO_HAVE_CPP11_FUTURE 1 + #endif +#endif + + +// #include +#if !defined(EA_HAVE_CPP11_TYPE_TRAITS) && !defined(EA_NO_HAVE_CPP11_TYPE_TRAITS) + #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 540) // Dinkumware. VS2012+ + #define EA_HAVE_CPP11_TYPE_TRAITS 1 + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4007) // Prior versions of libstdc++ have incomplete support for C++11 type traits. + #define EA_HAVE_CPP11_TYPE_TRAITS 1 + #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1) + #define EA_HAVE_CPP11_TYPE_TRAITS 1 + #else + #define EA_NO_HAVE_CPP11_TYPE_TRAITS 1 + #endif +#endif + +// #include +#if !defined(EA_HAVE_CPP11_TUPLES) && !defined(EA_NO_HAVE_CPP11_TUPLES) + #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 520) // Dinkumware. VS2010+ + #define EA_HAVE_CPP11_TUPLES 1 + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4003) + #define EA_HAVE_CPP11_TUPLES 1 + #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1) + #define EA_HAVE_CPP11_TUPLES 1 + #else + #define EA_NO_HAVE_CPP11_TUPLES 1 + #endif +#endif + +// #include +#if !defined(EA_HAVE_CPP11_REGEX) && !defined(EA_NO_HAVE_CPP11_REGEX) + #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 540) && (defined(_HAS_EXCEPTIONS) && _HAS_EXCEPTIONS) // Dinkumware. VS2012+ + #define EA_HAVE_CPP11_REGEX 1 + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4003) + #define EA_HAVE_CPP11_REGEX 1 + #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1) + #define EA_HAVE_CPP11_REGEX 1 + #else + #define EA_NO_HAVE_CPP11_REGEX 1 + #endif +#endif + +// #include +#if !defined(EA_HAVE_CPP11_RANDOM) && !defined(EA_NO_HAVE_CPP11_RANDOM) + #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 520) // Dinkumware. VS2010+ + #define EA_HAVE_CPP11_RANDOM 1 + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4005) + #define EA_HAVE_CPP11_RANDOM 1 + #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1) + #define EA_HAVE_CPP11_RANDOM 1 + #else + #define EA_NO_HAVE_CPP11_RANDOM 1 + #endif +#endif + +// #include +#if !defined(EA_HAVE_CPP11_CHRONO) && !defined(EA_NO_HAVE_CPP11_CHRONO) + #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 540) // Dinkumware. VS2012+ + #define EA_HAVE_CPP11_CHRONO 1 + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4007) // chrono was broken in glibc prior to 4.7. + #define EA_HAVE_CPP11_CHRONO 1 + #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1) + #define EA_HAVE_CPP11_CHRONO 1 + #else + #define EA_NO_HAVE_CPP11_CHRONO 1 + #endif +#endif + +// #include +#if !defined(EA_HAVE_CPP11_SCOPED_ALLOCATOR) && !defined(EA_NO_HAVE_CPP11_SCOPED_ALLOCATOR) + #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 540) // Dinkumware. VS2012+ + #define EA_HAVE_CPP11_SCOPED_ALLOCATOR 1 + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4007) + #define EA_HAVE_CPP11_SCOPED_ALLOCATOR 1 + #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1) + #define EA_HAVE_CPP11_SCOPED_ALLOCATOR 1 + #else + #define EA_NO_HAVE_CPP11_SCOPED_ALLOCATOR 1 + #endif +#endif + +// #include +#if !defined(EA_HAVE_CPP11_INITIALIZER_LIST) && !defined(EA_NO_HAVE_CPP11_INITIALIZER_LIST) + #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 520) && !defined(EA_COMPILER_NO_INITIALIZER_LISTS) // Dinkumware. VS2010+ + #define EA_HAVE_CPP11_INITIALIZER_LIST 1 + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_CLANG) && (EA_COMPILER_VERSION >= 301) && !defined(EA_COMPILER_NO_INITIALIZER_LISTS) && !defined(EA_PLATFORM_APPLE) + #define EA_HAVE_CPP11_INITIALIZER_LIST 1 + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBCPP_LIBRARY) && defined(EA_COMPILER_CLANG) && (EA_COMPILER_VERSION >= 301) && !defined(EA_COMPILER_NO_INITIALIZER_LISTS) && !defined(EA_PLATFORM_APPLE) + #define EA_HAVE_CPP11_INITIALIZER_LIST 1 + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4004) && !defined(EA_COMPILER_NO_INITIALIZER_LISTS) && !defined(EA_PLATFORM_APPLE) + #define EA_HAVE_CPP11_INITIALIZER_LIST 1 + #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1) && !defined(EA_COMPILER_NO_INITIALIZER_LISTS) + #define EA_HAVE_CPP11_INITIALIZER_LIST 1 + #else + #define EA_NO_HAVE_CPP11_INITIALIZER_LIST 1 + #endif +#endif + +// #include +#if !defined(EA_HAVE_CPP11_SYSTEM_ERROR) && !defined(EA_NO_HAVE_CPP11_SYSTEM_ERROR) + #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 520) && !(defined(_HAS_CPP0X) && _HAS_CPP0X) // Dinkumware. VS2010+ + #define EA_HAVE_CPP11_SYSTEM_ERROR 1 + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_CLANG) && (EA_COMPILER_VERSION >= 301) && !defined(EA_PLATFORM_APPLE) + #define EA_HAVE_CPP11_SYSTEM_ERROR 1 + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4004) && !defined(EA_PLATFORM_APPLE) + #define EA_HAVE_CPP11_SYSTEM_ERROR 1 + #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1) + #define EA_HAVE_CPP11_SYSTEM_ERROR 1 + #else + #define EA_NO_HAVE_CPP11_SYSTEM_ERROR 1 + #endif +#endif + +// #include +#if !defined(EA_HAVE_CPP11_CODECVT) && !defined(EA_NO_HAVE_CPP11_CODECVT) + #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 520) // Dinkumware. VS2010+ + #define EA_HAVE_CPP11_CODECVT 1 + // Future versions of libc++ may support this header. However, at the moment there isn't + // a reliable way of detecting if this header is available. + //#elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4008) + // #define EA_HAVE_CPP11_CODECVT 1 + #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1) + #define EA_HAVE_CPP11_CODECVT 1 + #else + #define EA_NO_HAVE_CPP11_CODECVT 1 + #endif +#endif + +// #include +#if !defined(EA_HAVE_CPP11_TYPEINDEX) && !defined(EA_NO_HAVE_CPP11_TYPEINDEX) + #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 520) // Dinkumware. VS2010+ + #define EA_HAVE_CPP11_TYPEINDEX 1 + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4006) + #define EA_HAVE_CPP11_TYPEINDEX 1 + #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1) + #define EA_HAVE_CPP11_TYPEINDEX 1 + #else + #define EA_NO_HAVE_CPP11_TYPEINDEX 1 + #endif +#endif + + + + +/* EA_HAVE_XXX_DECL */ + +#if !defined(EA_HAVE_mkstemps_DECL) && !defined(EA_NO_HAVE_mkstemps_DECL) + #if defined(EA_PLATFORM_APPLE) || defined(CS_UNDEFINED_STRING) + #define EA_HAVE_mkstemps_DECL 1 + #else + #define EA_NO_HAVE_mkstemps_DECL 1 + #endif +#endif + +#if !defined(EA_HAVE_gettimeofday_DECL) && !defined(EA_NO_HAVE_gettimeofday_DECL) + #if defined(EA_PLATFORM_POSIX) /* Posix means Linux, Unix, and Macintosh OSX, among others (including Linux-based mobile platforms). */ + #define EA_HAVE_gettimeofday_DECL 1 + #else + #define EA_NO_HAVE_gettimeofday_DECL 1 + #endif +#endif + +#if !defined(EA_HAVE_strcasecmp_DECL) && !defined(EA_NO_HAVE_strcasecmp_DECL) + #if !defined(EA_PLATFORM_MICROSOFT) + #define EA_HAVE_strcasecmp_DECL 1 /* This is found as stricmp when not found as strcasecmp */ + #define EA_HAVE_strncasecmp_DECL 1 + #else + #define EA_HAVE_stricmp_DECL 1 + #define EA_HAVE_strnicmp_DECL 1 + #endif +#endif + +#if !defined(EA_HAVE_mmap_DECL) && !defined(EA_NO_HAVE_mmap_DECL) + #if defined(EA_PLATFORM_POSIX) + #define EA_HAVE_mmap_DECL 1 /* mmap functionality varies significantly between systems. */ + #else + #define EA_NO_HAVE_mmap_DECL 1 + #endif +#endif + +#if !defined(EA_HAVE_fopen_DECL) && !defined(EA_NO_HAVE_fopen_DECL) + #define EA_HAVE_fopen_DECL 1 /* C FILE functionality such as fopen */ +#endif + +#if !defined(EA_HAVE_ISNAN) && !defined(EA_NO_HAVE_ISNAN) + #if defined(EA_PLATFORM_MICROSOFT) && !defined(EA_PLATFORM_MINGW) + #define EA_HAVE_ISNAN(x) _isnan(x) /* declared in */ + #define EA_HAVE_ISINF(x) !_finite(x) + #elif defined(EA_PLATFORM_APPLE) + #define EA_HAVE_ISNAN(x) std::isnan(x) /* declared in */ + #define EA_HAVE_ISINF(x) std::isinf(x) + #elif defined(EA_PLATFORM_ANDROID) + #define EA_HAVE_ISNAN(x) __builtin_isnan(x) /* There are a number of standard libraries for Android and it's hard to tell them apart, so just go with builtins */ + #define EA_HAVE_ISINF(x) __builtin_isinf(x) + #elif defined(__GNUC__) && defined(__CYGWIN__) + #define EA_HAVE_ISNAN(x) __isnand(x) /* declared nowhere, it seems. */ + #define EA_HAVE_ISINF(x) __isinfd(x) + #else + #define EA_HAVE_ISNAN(x) std::isnan(x) /* declared in */ + #define EA_HAVE_ISINF(x) std::isinf(x) + #endif +#endif + +#if !defined(EA_HAVE_itoa_DECL) && !defined(EA_NO_HAVE_itoa_DECL) + #if defined(EA_COMPILER_MSVC) + #define EA_HAVE_itoa_DECL 1 + #else + #define EA_NO_HAVE_itoa_DECL 1 + #endif +#endif + +#if !defined(EA_HAVE_nanosleep_DECL) && !defined(EA_NO_HAVE_nanosleep_DECL) + #if (defined(EA_PLATFORM_UNIX) && !defined(EA_PLATFORM_SONY)) || defined(EA_PLATFORM_IPHONE) || defined(EA_PLATFORM_OSX) || defined(EA_PLATFORM_SONY) || defined(CS_UNDEFINED_STRING) + #define EA_HAVE_nanosleep_DECL 1 + #else + #define EA_NO_HAVE_nanosleep_DECL 1 + #endif +#endif + +#if !defined(EA_HAVE_utime_DECL) && !defined(EA_NO_HAVE_utime_DECL) + #if defined(EA_PLATFORM_MICROSOFT) + #define EA_HAVE_utime_DECL _utime + #elif EA_PLATFORM_UNIX + #define EA_HAVE_utime_DECL utime + #else + #define EA_NO_HAVE_utime_DECL 1 + #endif +#endif + +#if !defined(EA_HAVE_ftruncate_DECL) && !defined(EA_NO_HAVE_ftruncate_DECL) + #if !defined(__MINGW32__) + #define EA_HAVE_ftruncate_DECL 1 + #else + #define EA_NO_HAVE_ftruncate_DECL 1 + #endif +#endif + +#if !defined(EA_HAVE_localtime_DECL) && !defined(EA_NO_HAVE_localtime_DECL) + #define EA_HAVE_localtime_DECL 1 +#endif + +#if !defined(EA_HAVE_pthread_getattr_np_DECL) && !defined(EA_NO_HAVE_pthread_getattr_np_DECL) + #if defined(EA_PLATFORM_LINUX) + #define EA_HAVE_pthread_getattr_np_DECL 1 + #else + #define EA_NO_HAVE_pthread_getattr_np_DECL 1 + #endif +#endif + + + +/* EA_HAVE_XXX_IMPL*/ + +#if !defined(EA_HAVE_WCHAR_IMPL) && !defined(EA_NO_HAVE_WCHAR_IMPL) + #if defined(EA_PLATFORM_DESKTOP) + #define EA_HAVE_WCHAR_IMPL 1 /* Specifies if wchar_t string functions are provided, such as wcslen, wprintf, etc. Implies EA_HAVE_WCHAR_H */ + #else + #define EA_NO_HAVE_WCHAR_IMPL 1 + #endif +#endif + +#if !defined(EA_HAVE_getenv_IMPL) && !defined(EA_NO_HAVE_getenv_IMPL) + #if (defined(EA_PLATFORM_DESKTOP) || defined(EA_PLATFORM_UNIX)) && !defined(EA_PLATFORM_WINRT) + #define EA_HAVE_getenv_IMPL 1 + #else + #define EA_NO_HAVE_getenv_IMPL 1 + #endif +#endif + +#if !defined(EA_HAVE_setenv_IMPL) && !defined(EA_NO_HAVE_setenv_IMPL) + #if defined(EA_PLATFORM_UNIX) && defined(EA_PLATFORM_POSIX) + #define EA_HAVE_setenv_IMPL 1 + #else + #define EA_NO_HAVE_setenv_IMPL 1 + #endif +#endif + +#if !defined(EA_HAVE_unsetenv_IMPL) && !defined(EA_NO_HAVE_unsetenv_IMPL) + #if defined(EA_PLATFORM_UNIX) && defined(EA_PLATFORM_POSIX) + #define EA_HAVE_unsetenv_IMPL 1 + #else + #define EA_NO_HAVE_unsetenv_IMPL 1 + #endif +#endif + +#if !defined(EA_HAVE_putenv_IMPL) && !defined(EA_NO_HAVE_putenv_IMPL) + #if (defined(EA_PLATFORM_DESKTOP) || defined(EA_PLATFORM_UNIX)) && !defined(EA_PLATFORM_WINRT) + #define EA_HAVE_putenv_IMPL 1 /* With Microsoft compilers you may need to use _putenv, as they have deprecated putenv. */ + #else + #define EA_NO_HAVE_putenv_IMPL 1 + #endif +#endif + +#if !defined(EA_HAVE_time_IMPL) && !defined(EA_NO_HAVE_time_IMPL) + #define EA_HAVE_time_IMPL 1 + #define EA_HAVE_clock_IMPL 1 +#endif + +// fopen() +#if !defined(EA_HAVE_fopen_IMPL) && !defined(EA_NO_HAVE_fopen_IMPL) + #define EA_HAVE_fopen_IMPL 1 /* C FILE functionality such as fopen */ +#endif + +// inet_ntop() +#if !defined(EA_HAVE_inet_ntop_IMPL) && !defined(EA_NO_HAVE_inet_ntop_IMPL) + #if (defined(EA_PLATFORM_UNIX) || defined(EA_PLATFORM_POSIX)) && !defined(EA_PLATFORM_SONY) && !defined(CS_UNDEFINED_STRING) + #define EA_HAVE_inet_ntop_IMPL 1 /* This doesn't identify if the platform SDK has some alternative function that does the same thing; */ + #define EA_HAVE_inet_pton_IMPL 1 /* it identifies strictly the inet_ntop and inet_pton functions. For example, Microsoft has InetNtop in */ + #else + #define EA_NO_HAVE_inet_ntop_IMPL 1 + #define EA_NO_HAVE_inet_pton_IMPL 1 + #endif +#endif + +// clock_gettime() +#if !defined(EA_HAVE_clock_gettime_IMPL) && !defined(EA_NO_HAVE_clock_gettime_IMPL) + #if defined(EA_PLATFORM_LINUX) || defined(__CYGWIN__) || (defined(_POSIX_TIMERS) && (_POSIX_TIMERS > 0)) || (defined(EA_PLATFORM_POSIX) && defined(_CPPLIB_VER) /*Dinkumware*/) + #define EA_HAVE_clock_gettime_IMPL 1 /* You need to link the 'rt' library to get this */ + #else + #define EA_NO_HAVE_clock_gettime_IMPL 1 + #endif +#endif + +#if !defined(EA_HAVE_getcwd_IMPL) && !defined(EA_NO_HAVE_getcwd_IMPL) + #if (defined(EA_PLATFORM_DESKTOP) || defined(EA_PLATFORM_UNIX)) && !defined(EA_PLATFORM_ANDROID) && !defined(EA_PLATFORM_WINRT) + #define EA_HAVE_getcwd_IMPL 1 /* With Microsoft compilers you may need to use _getcwd, as they have deprecated getcwd. And in any case it's present at */ + #else + #define EA_NO_HAVE_getcwd_IMPL 1 + #endif +#endif + +#if !defined(EA_HAVE_tmpnam_IMPL) && !defined(EA_NO_HAVE_tmpnam_IMPL) + #if (defined(EA_PLATFORM_DESKTOP) || defined(EA_PLATFORM_UNIX)) && !defined(EA_PLATFORM_ANDROID) + #define EA_HAVE_tmpnam_IMPL 1 + #else + #define EA_NO_HAVE_tmpnam_IMPL 1 + #endif +#endif + +// nullptr, the built-in C++11 type. +// This EA_HAVE is deprecated, as EA_COMPILER_NO_NULLPTR is more appropriate, given that nullptr is a compiler-level feature and not a library feature. +#if !defined(EA_HAVE_nullptr_IMPL) && !defined(EA_NO_HAVE_nullptr_IMPL) + #if defined(EA_COMPILER_NO_NULLPTR) + #define EA_NO_HAVE_nullptr_IMPL 1 + #else + #define EA_HAVE_nullptr_IMPL 1 + #endif +#endif + +// std::nullptr_t +// Note that implements a portable nullptr implementation, but this +// EA_HAVE specifically refers to std::nullptr_t from the standard libraries. +#if !defined(EA_HAVE_nullptr_t_IMPL) && !defined(EA_NO_HAVE_nullptr_t_IMPL) + #if defined(EA_COMPILER_CPP11_ENABLED) + // VS2010+ with its default Dinkumware standard library. + #if defined(_MSC_VER) && (_MSC_VER >= 1600) && defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) + #define EA_HAVE_nullptr_t_IMPL 1 + + #elif defined(EA_HAVE_LIBCPP_LIBRARY) // clang/llvm libc++ + #define EA_HAVE_nullptr_t_IMPL 1 + + #elif defined(EA_HAVE_LIBSTDCPP_LIBRARY) // GNU libstdc++ + // Unfortunately __GLIBCXX__ date values don't go strictly in version ordering. + #if (__GLIBCXX__ >= 20110325) && (__GLIBCXX__ != 20120702) && (__GLIBCXX__ != 20110428) + #define EA_HAVE_nullptr_t_IMPL 1 + #else + #define EA_NO_HAVE_nullptr_t_IMPL 1 + #endif + + // We simply assume that the standard library (e.g. Dinkumware) provides std::nullptr_t. + #elif defined(__clang__) + #define EA_HAVE_nullptr_t_IMPL 1 + + // With GCC compiler >= 4.6, std::nullptr_t is always defined in , in practice. + #elif defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4006) + #define EA_HAVE_nullptr_t_IMPL 1 + + // The EDG compiler provides nullptr, but uses an older standard library that doesn't support std::nullptr_t. + #elif defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 403) + #define EA_HAVE_nullptr_t_IMPL 1 + + #else + #define EA_NO_HAVE_nullptr_t_IMPL 1 + #endif + #else + #define EA_NO_HAVE_nullptr_t_IMPL 1 + #endif +#endif + +// std::terminate +#if !defined(EA_HAVE_std_terminate_IMPL) && !defined(EA_NO_HAVE_std_terminate_IMPL) + #if !defined(EA_PLATFORM_IPHONE) && !defined(EA_PLATFORM_ANDROID) + #define EA_HAVE_std_terminate_IMPL 1 /* iOS doesn't appear to provide an implementation for std::terminate under the armv6 target. */ + #else + #define EA_NO_HAVE_std_terminate_IMPL 1 + #endif +#endif + +// : std::begin, std::end, std::prev, std::next, std::move_iterator. +#if !defined(EA_HAVE_CPP11_ITERATOR_IMPL) && !defined(EA_NO_HAVE_CPP11_ITERATOR_IMPL) + #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 520) && !(defined(_HAS_CPP0X) && _HAS_CPP0X) // Dinkumware. VS2010+ + #define EA_HAVE_CPP11_ITERATOR_IMPL 1 + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4006) + #define EA_HAVE_CPP11_ITERATOR_IMPL 1 + #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1) + #define EA_HAVE_CPP11_ITERATOR_IMPL 1 + #else + #define EA_NO_HAVE_CPP11_ITERATOR_IMPL 1 + #endif +#endif + +// : std::weak_ptr, std::shared_ptr, std::unique_ptr, std::bad_weak_ptr, std::owner_less +#if !defined(EA_HAVE_CPP11_SMART_POINTER_IMPL) && !defined(EA_NO_HAVE_CPP11_SMART_POINTER_IMPL) + #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 520) && !(defined(_HAS_CPP0X) && _HAS_CPP0X) // Dinkumware. VS2010+ + #define EA_HAVE_CPP11_SMART_POINTER_IMPL 1 + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4004) + #define EA_HAVE_CPP11_SMART_POINTER_IMPL 1 + #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1) + #define EA_HAVE_CPP11_SMART_POINTER_IMPL 1 + #else + #define EA_NO_HAVE_CPP11_SMART_POINTER_IMPL 1 + #endif +#endif + +// : std::function, std::mem_fn, std::bad_function_call, std::is_bind_expression, std::is_placeholder, std::reference_wrapper, std::hash, std::bind, std::ref, std::cref. +#if !defined(EA_HAVE_CPP11_FUNCTIONAL_IMPL) && !defined(EA_NO_HAVE_CPP11_FUNCTIONAL_IMPL) + #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 520) && !(defined(_HAS_CPP0X) && _HAS_CPP0X) // Dinkumware. VS2010+ + #define EA_HAVE_CPP11_FUNCTIONAL_IMPL 1 + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4004) + #define EA_HAVE_CPP11_FUNCTIONAL_IMPL 1 + #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1) + #define EA_HAVE_CPP11_FUNCTIONAL_IMPL 1 + #else + #define EA_NO_HAVE_CPP11_FUNCTIONAL_IMPL 1 + #endif +#endif + +// std::current_exception, std::rethrow_exception, std::exception_ptr, std::make_exception_ptr +#if !defined(EA_HAVE_CPP11_EXCEPTION_IMPL) && !defined(EA_NO_HAVE_CPP11_EXCEPTION_IMPL) + #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 520) && !(defined(_HAS_CPP0X) && _HAS_CPP0X) // Dinkumware. VS2010+ + #define EA_HAVE_CPP11_EXCEPTION_IMPL 1 + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4004) + #define EA_HAVE_CPP11_EXCEPTION_IMPL 1 + #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1) + #define EA_HAVE_CPP11_EXCEPTION_IMPL 1 + #else + #define EA_NO_HAVE_CPP11_EXCEPTION_IMPL 1 + #endif +#endif + + + + +/* Implementations that all platforms seem to have: */ +/* + alloca + malloc + calloc + strtoll + strtoull + vsprintf + vsnprintf +*/ + +/* Implementations that we don't care about: */ +/* + bcopy -- Just use memmove or some customized equivalent. bcopy offers no practical benefit. + strlcpy -- So few platforms have this built-in that we get no benefit from using it. Use EA::StdC::Strlcpy instead. + strlcat -- " +*/ + + + +/*----------------------------------------------------------------------------- + EABASE_USER_HAVE_HEADER + + This allows the user to define a header file to be #included after the + eahave.h's contents are compiled. A primary use of this is to override + the contents of this header file. You can define the overhead header + file name in-code or define it globally as part of your build file. + + Example usage: + #define EABASE_USER_HAVE_HEADER "MyHaveOverrides.h" + #include +---------------------------------------------------------------------------*/ + +#ifdef EABASE_USER_HAVE_HEADER + #include EABASE_USER_HAVE_HEADER +#endif + + +#endif /* Header include guard */ + + + diff --git a/external/EASTL/include/EABase/earesult.h b/external/EASTL/include/EABase/earesult.h new file mode 100644 index 00000000..d08b3460 --- /dev/null +++ b/external/EASTL/include/EABase/earesult.h @@ -0,0 +1,62 @@ +/*----------------------------------------------------------------------------- + * earesult.h + * + * Copyright (c) Electronic Arts Inc. All rights reserved. + *---------------------------------------------------------------------------*/ + + +#ifndef INCLUDED_earesult_H +#define INCLUDED_earesult_H + + +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once /* Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. */ +#endif + + + +/* This result type is width-compatible with most systems. */ +typedef int32_t ea_result_type; + + +namespace EA +{ + typedef int32_t result_type; + + enum + { +#ifndef SUCCESS + // Deprecated + // Note: a public MS header has created a define of this name which causes a build error. Fortunately they + // define it to 0 which is compatible. + // see: WindowsSDK\8.1.51641-fb\installed\Include\um\RasError.h + SUCCESS = 0, +#endif + // Deprecated + FAILURE = -1, + + // These values are now the preferred constants + EA_SUCCESS = 0, + EA_FAILURE = -1, + }; +} + + +/* Macro to simplify testing for success. */ +#ifndef EA_SUCCEEDED + #define EA_SUCCEEDED(result) ((result) >= 0) +#endif + +/* Macro to simplfify testing for general failure. */ +#ifndef EA_FAILED + #define EA_FAILED(result) ((result) < 0) +#endif + + +#endif + + + + diff --git a/external/EASTL/include/EABase/eastdarg.h b/external/EASTL/include/EABase/eastdarg.h new file mode 100644 index 00000000..2c613eb8 --- /dev/null +++ b/external/EASTL/include/EABase/eastdarg.h @@ -0,0 +1,99 @@ +/*----------------------------------------------------------------------------- + * eastdarg.h + * + * Copyright (c) Electronic Arts Inc. All rights reserved. + *---------------------------------------------------------------------------*/ + + +#ifndef INCLUDED_eastdarg_H +#define INCLUDED_eastdarg_H + + +#include +#include + + +// VA_ARG_COUNT +// +// Returns the number of arguments passed to a macro's ... argument. +// This applies to macros only and not functions. +// +// Example usage: +// assert(VA_ARG_COUNT() == 0); +// assert(VA_ARG_COUNT(a) == 1); +// assert(VA_ARG_COUNT(a, b) == 2); +// assert(VA_ARG_COUNT(a, b, c) == 3); +// +#if !defined(VA_ARG_COUNT) + #define VA_ARG_COUNT(...) VA_ARG_COUNT_II((VA_ARG_COUNT_PREFIX_ ## __VA_ARGS__ ## _VA_ARG_COUNT_POSTFIX,32,31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0)) + #define VA_ARG_COUNT_II(__args) VA_ARG_COUNT_I __args + #define VA_ARG_COUNT_PREFIX__VA_ARG_COUNT_POSTFIX ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,0 + #define VA_ARG_COUNT_I(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_10,_11,_12,_13,_14,_15,_16,_17,_18,_19,_20,_21,_22,_23,_24,_25,_26,_27,_28,_29,_30,_31,N,...) N +#endif + + +// va_copy +// +// va_copy is required by C++11 +// C++11 and C99 require va_copy to be #defined and implemented. +// http://en.cppreference.com/w/cpp/utility/variadic/va_copy +// +// Example usage: +// void Func(char* p, ...){ +// va_list args, argsCopy; +// va_start(args, p); +// va_copy(argsCopy, args); +// (use args) +// (use argsCopy, which acts the same as args) +// va_end(args); +// va_end(argsCopy); +// } +// +#ifndef va_copy + #if defined(__va_copy) // GCC and others define this for non-C99 compatibility. + #define va_copy(dest, src) __va_copy((dest), (src)) + #else + // This may not work for some platforms, depending on their ABI. + // It works for Microsoft x86,x64, and PowerPC-based platforms. + #define va_copy(dest, src) memcpy(&(dest), &(src), sizeof(va_list)) + #endif +#endif + + + +// va_list_reference +// +// va_list_reference is not part of the C or C++ standards. +// It allows you to pass a va_list by reference to another +// function instead of by value. You cannot simply use va_list& +// as that won't work with many va_list implementations because +// they are implemented as arrays (which can't be passed by +// reference to a function without decaying to a pointer). +// +// Example usage: +// void Test(va_list_reference args){ +// printf("%d", va_arg(args, int)); +// } +// void Func(char* p, ...){ +// va_list args; +// va_start(args, p); +// Test(args); // Upon return args will be modified. +// va_end(args); +// } +#ifndef va_list_reference + #if defined(EA_PLATFORM_MICROSOFT) || (EA_PLATFORM_PTR_SIZE == 4) || (defined(EA_PLATFORM_APPLE) && defined(EA_PROCESSOR_ARM64)) || defined(CS_UNDEFINED_STRING) || (defined(EA_PLATFORM_ANDROID) && defined(EA_PROCESSOR_ARM64)) + // This is required for platform ABIs in which va_list is a struct or pointer. + #define va_list_reference va_list& + #else + // This is required for platform ABIs in which va_list is defined to be an array. + #define va_list_reference va_list + #endif +#endif + + + + +#endif /* Header include guard */ + + + diff --git a/external/EASTL/include/EABase/eaunits.h b/external/EASTL/include/EABase/eaunits.h new file mode 100644 index 00000000..22357234 --- /dev/null +++ b/external/EASTL/include/EABase/eaunits.h @@ -0,0 +1,54 @@ +/*----------------------------------------------------------------------------- + * eaunits.h + * + * Copyright (c) Electronic Arts Inc. All rights reserved. + *---------------------------------------------------------------------------*/ + + +#ifndef INCLUDED_eaunits_h +#define INCLUDED_eaunits_h + +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + +// Defining common SI unit macros. +// +// The mebibyte is a multiple of the unit byte for digital information. Technically a +// megabyte (MB) is a power of ten, while a mebibyte (MiB) is a power of two, +// appropriate for binary machines. Many Linux distributions use the unit, but it is +// not widely acknowledged within the industry or media. +// Reference: https://en.wikipedia.org/wiki/Mebibyte +// +// Examples: +// auto size1 = EA_KILOBYTE(16); +// auto size2 = EA_MEGABYTE(128); +// auto size3 = EA_MEBIBYTE(8); +// auto size4 = EA_GIBIBYTE(8); + +// define byte for completeness +#define EA_BYTE(x) (x) + +// Decimal SI units +#define EA_KILOBYTE(x) (size_t(x) * 1000) +#define EA_MEGABYTE(x) (size_t(x) * 1000 * 1000) +#define EA_GIGABYTE(x) (size_t(x) * 1000 * 1000 * 1000) +#define EA_TERABYTE(x) (size_t(x) * 1000 * 1000 * 1000 * 1000) +#define EA_PETABYTE(x) (size_t(x) * 1000 * 1000 * 1000 * 1000 * 1000) +#define EA_EXABYTE(x) (size_t(x) * 1000 * 1000 * 1000 * 1000 * 1000 * 1000) + +// Binary SI units +#define EA_KIBIBYTE(x) (size_t(x) * 1024) +#define EA_MEBIBYTE(x) (size_t(x) * 1024 * 1024) +#define EA_GIBIBYTE(x) (size_t(x) * 1024 * 1024 * 1024) +#define EA_TEBIBYTE(x) (size_t(x) * 1024 * 1024 * 1024 * 1024) +#define EA_PEBIBYTE(x) (size_t(x) * 1024 * 1024 * 1024 * 1024 * 1024) +#define EA_EXBIBYTE(x) (size_t(x) * 1024 * 1024 * 1024 * 1024 * 1024 * 1024) + +#endif // INCLUDED_earesult_H + + + + diff --git a/external/EASTL/include/EABase/int128.h b/external/EASTL/include/EABase/int128.h new file mode 100644 index 00000000..068d557a --- /dev/null +++ b/external/EASTL/include/EABase/int128.h @@ -0,0 +1,1268 @@ +/*----------------------------------------------------------------------------- + * eaint128_t.h + * + * Copyright (c) Electronic Arts Inc. All rights reserved. + *---------------------------------------------------------------------------*/ + + +#ifndef INCLUDED_int128_h +#define INCLUDED_int128_h + + +/////////////////////////////////////////////////////////////////////////////////////////////////////// +// EA_INT128_INTRINSIC_AVAILABLE +// +#if (EA_COMPILER_INTMAX_SIZE >= 16) && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG)) + // __int128_t/__uint128_t is supported + #define EA_INT128_INTRINSIC_AVAILABLE 1 +#else + #define EA_INT128_INTRINSIC_AVAILABLE 0 +#endif + +/////////////////////////////////////////////////////////////////////////////////////////////////////// +// EA_INT128_ALIGNAS +// +#if EA_INT128_INTRINSIC_AVAILABLE && !defined(EA_COMPILER_NO_ALIGNAS) + #define EA_INT128_ALIGNAS alignas(unsigned __int128) +#else + #define EA_INT128_ALIGNAS +#endif + + +/////////////////////////////////////////////////////////////////////////////////////////////////////// +// EA_HAVE_INT128 +// +// Indicates that EABase implements 128-bit integer types +// +#define EA_HAVE_INT128 1 + + +/////////////////////////////////////////////////////////////////////////////////////////////////////// +// uint128_t_base +// +struct EA_INT128_ALIGNAS int128_t_base +{ + // Constructors / destructors + int128_t_base() = default; + int128_t_base(uint32_t nPart0, uint32_t nPart1, uint32_t nPart2, uint32_t nPart3); + int128_t_base(uint64_t nPart0, uint64_t nPart1); + int128_t_base(uint8_t value); + int128_t_base(uint16_t value); + int128_t_base(uint32_t value); + int128_t_base(uint64_t value); + int128_t_base(const int128_t_base& value) = default; + + // Assignment operator + int128_t_base& operator=(const int128_t_base& value) = default; + + // Explicit operators to convert back to basic types + EA_CONSTEXPR explicit operator bool() const; + EA_CONSTEXPR explicit operator char() const; + EA_CONSTEXPR explicit operator int() const; + EA_CONSTEXPR explicit operator long() const; + EA_CONSTEXPR explicit operator long long() const; + EA_CONSTEXPR explicit operator short() const; + EA_CONSTEXPR explicit operator signed char() const; + EA_CONSTEXPR explicit operator unsigned char() const; + EA_CONSTEXPR explicit operator unsigned int() const; + EA_CONSTEXPR explicit operator unsigned long long() const; + EA_CONSTEXPR explicit operator unsigned long() const; + EA_CONSTEXPR explicit operator unsigned short() const; +#if EA_WCHAR_UNIQUE + // EA_CONSTEXPR explicit operator char16_t() const; + // EA_CONSTEXPR explicit operator char32_t() const; + // EA_CONSTEXPR explicit operator wchar_t() const; +#endif + EA_CONSTEXPR explicit operator float() const; + EA_CONSTEXPR explicit operator double() const; + EA_CONSTEXPR explicit operator long double() const; +#if EA_INT128_INTRINSIC_AVAILABLE + EA_CONSTEXPR explicit operator __int128() const; + EA_CONSTEXPR explicit operator unsigned __int128() const; +#endif + + // Math operators + static void OperatorPlus (const int128_t_base& value1, const int128_t_base& value2, int128_t_base& result); + static void OperatorMinus(const int128_t_base& value1, const int128_t_base& value2, int128_t_base& result); + static void OperatorMul (const int128_t_base& value1, const int128_t_base& value2, int128_t_base& result); + + // Shift operators + static void OperatorShiftRight(const int128_t_base& value, int nShift, int128_t_base& result); + static void OperatorShiftLeft (const int128_t_base& value, int nShift, int128_t_base& result); + + // Unary arithmetic/logic operators + bool operator!() const; + + // Logical operators + static void OperatorXOR(const int128_t_base& value1, const int128_t_base& value2, int128_t_base& result); + static void OperatorOR (const int128_t_base& value1, const int128_t_base& value2, int128_t_base& result); + static void OperatorAND(const int128_t_base& value1, const int128_t_base& value2, int128_t_base& result); + + bool IsZero() const; + void SetZero(); + void TwosComplement(); + void InverseTwosComplement(); + + int GetBit(int nIndex) const; + void SetBit(int nIndex, int value); + +protected: + void DoubleToUint128(double value); + + EA_CONSTEXPR uint64_t Low() const + { + return mPart0; + } + + EA_CONSTEXPR uint64_t High() const + { + return mPart1; + } + +protected: + #ifdef EA_SYSTEM_BIG_ENDIAN + uint64_t mPart1; // Most significant byte. + uint64_t mPart0; // Least significant byte. + #else + uint64_t mPart0; // Most significant byte. + uint64_t mPart1; // Least significant byte. + #endif +}; + +/////////////////////////////////////////////////////////////////////////////////////////////////////// +// int128_t +// +// Implements signed 128 bit integer. +// +struct int128_t : public int128_t_base +{ + // Constructors / destructors + using int128_t_base::int128_t_base; + + // Assignment operator + using int128_t_base::operator=; + + // Unary arithmetic/logic operators + int128_t operator-() const; + int128_t& operator++(); + int128_t& operator--(); + int128_t operator++(int); + int128_t operator--(int); + int128_t operator~() const; + int128_t operator+() const; + + // Math operators + int128_t operator+ (const int128_t& other); + int128_t operator- (const int128_t& other); + int128_t operator* (const int128_t& other); + int128_t operator/ (const int128_t& other); + int128_t operator% (const int128_t& other); + int128_t& operator+=(const int128_t& other); + int128_t& operator-=(const int128_t& other); + int128_t& operator*=(const int128_t& other); + int128_t& operator/=(const int128_t& other); + int128_t& operator%=(const int128_t& other); + + // Shift operators + int128_t operator>> (int nShift) const; + int128_t operator<< (int nShift) const; + int128_t& operator>>=(int nShift); + int128_t& operator<<=(int nShift); + + // Logical operators + int128_t operator^ (const int128_t& other) const; + int128_t operator| (const int128_t& other) const; + int128_t operator& (const int128_t& other) const; + int128_t& operator^=(const int128_t& other); + int128_t& operator|=(const int128_t& other); + int128_t& operator&=(const int128_t& other); + + // Equality operators + bool operator==(const int128_t& other) const; + bool operator!=(const int128_t& other) const; + bool operator> (const int128_t& other) const; + bool operator>=(const int128_t& other) const; + bool operator< (const int128_t& other) const; + bool operator<=(const int128_t& other) const; + +protected: + int compare(const int128_t& other) const; + void Negate(); + void Modulus(const int128_t& divisor, int128_t& quotient, int128_t& remainder) const; + bool IsNegative() const; // Returns true for value < 0 + bool IsPositive() const; // Returns true for value >= 0 +}; + + +/////////////////////////////////////////////////////////////////////////////////////////////////////// +// uint128_t +// +// Implements unsigned 128 bit integer. +// +struct uint128_t : public int128_t_base +{ + // Constructors / destructors + using int128_t_base::int128_t_base; + + // Assignment operator + using int128_t_base::operator=; + + // Unary arithmetic/logic operators + uint128_t operator-() const; + uint128_t& operator++(); + uint128_t& operator--(); + uint128_t operator++(int); + uint128_t operator--(int); + uint128_t operator~() const; + uint128_t operator+() const; + + // Math operators + uint128_t operator+ (const uint128_t& other); + uint128_t operator- (const uint128_t& other); + uint128_t operator* (const uint128_t& other); + uint128_t operator/ (const uint128_t& other); + uint128_t operator% (const uint128_t& other); + uint128_t& operator+=(const uint128_t& other); + uint128_t& operator-=(const uint128_t& other); + uint128_t& operator*=(const uint128_t& other); + uint128_t& operator/=(const uint128_t& other); + uint128_t& operator%=(const uint128_t& other); + + // Shift operators + uint128_t operator>> (int nShift) const; + uint128_t operator<< (int nShift) const; + uint128_t& operator>>=(int nShift); + uint128_t& operator<<=(int nShift); + + // Logical operators + uint128_t operator^ (const uint128_t& other) const; + uint128_t operator| (const uint128_t& other) const; + uint128_t operator& (const uint128_t& other) const; + uint128_t& operator^=(const uint128_t& other); + uint128_t& operator|=(const uint128_t& other); + uint128_t& operator&=(const uint128_t& other); + + // Equality operators + bool operator==(const uint128_t& other) const; + bool operator!=(const uint128_t& other) const; + bool operator> (const uint128_t& other) const; + bool operator>=(const uint128_t& other) const; + bool operator< (const uint128_t& other) const; + bool operator<=(const uint128_t& other) const; + +protected: + int compare(const uint128_t& other) const; + void Negate(); + void Modulus(const uint128_t& divisor, uint128_t& quotient, uint128_t& remainder) const; + bool IsNegative() const; // Returns true for value < 0 + bool IsPositive() const; // Returns true for value >= 0 +}; + + + +/////////////////////////////////////////////////////////////////////////////////////////////////////// +// uint128_t_base implementation +/////////////////////////////////////////////////////////////////////////////////////////////////////// +EA_CONSTEXPR inline int128_t_base::operator bool() const { return mPart0 || mPart1; } +EA_CONSTEXPR inline int128_t_base::operator char() const { return static_cast(Low()); } +#if EA_WCHAR_UNIQUE +// EA_CONSTEXPR inline int128_t_base::operator char16_t() const { return static_cast(Low()); } +// EA_CONSTEXPR inline int128_t_base::operator char32_t() const { return static_cast(Low()); } +// EA_CONSTEXPR inline int128_t_base::operator wchar_t() const { return static_cast(Low()); } +#endif +EA_CONSTEXPR inline int128_t_base::operator int() const { return static_cast(Low()); } +EA_CONSTEXPR inline int128_t_base::operator long() const { return static_cast(Low()); } +EA_CONSTEXPR inline int128_t_base::operator long long() const { return static_cast(Low()); } +EA_CONSTEXPR inline int128_t_base::operator short() const { return static_cast(Low()); } +EA_CONSTEXPR inline int128_t_base::operator signed char() const { return static_cast(Low()); } +EA_CONSTEXPR inline int128_t_base::operator unsigned char() const { return static_cast(Low()); } +EA_CONSTEXPR inline int128_t_base::operator unsigned int() const { return static_cast(Low()); } +EA_CONSTEXPR inline int128_t_base::operator unsigned long long() const { return static_cast(Low()); } +EA_CONSTEXPR inline int128_t_base::operator unsigned long() const { return static_cast(Low()); } +EA_CONSTEXPR inline int128_t_base::operator unsigned short() const { return static_cast(Low()); } +EA_CONSTEXPR inline int128_t_base::operator float() const { return static_cast(Low()); } +EA_CONSTEXPR inline int128_t_base::operator double() const { return static_cast(Low()); } +EA_CONSTEXPR inline int128_t_base::operator long double() const { return static_cast(Low()); } +#if EA_INT128_INTRINSIC_AVAILABLE +EA_CONSTEXPR inline int128_t_base::operator __int128() const { return static_cast<__int128>(Low()); } +EA_CONSTEXPR inline int128_t_base::operator unsigned __int128() const { return static_cast(Low()); } +#endif + +inline void int128_t_base::SetBit(int nIndex, int value) +{ + // EA_ASSERT((nIndex >= 0) && (nIndex < 128)); + + const uint64_t nBitMask = ((uint64_t)1 << (nIndex % 64)); + + if(nIndex < 64) + { + if(value) + mPart0 = mPart0 | nBitMask; + else + mPart0 = mPart0 & ~nBitMask; + } + else if(nIndex < 128) + { + if(value) + mPart1 = mPart1 | nBitMask; + else + mPart1 = mPart1 & ~nBitMask; + } +} + +inline int int128_t_base::GetBit(int nIndex) const +{ + // EA_ASSERT((nIndex >= 0) && (nIndex < 128)); + + const uint64_t nBitMask = ((uint64_t)1 << (nIndex % 64)); + + if(nIndex < 64) + return ((mPart0 & nBitMask) ? 1 : 0); + else if(nIndex < 128) + return ((mPart1 & nBitMask) ? 1 : 0); + return 0; +} + +inline int128_t_base::int128_t_base(uint32_t nPart0, uint32_t nPart1, uint32_t nPart2, uint32_t nPart3) +{ + mPart1 = ((uint64_t)nPart3 << 32) + nPart2; + mPart0 = ((uint64_t)nPart1 << 32) + nPart0; +} + +inline int128_t_base::int128_t_base(uint64_t nPart0, uint64_t nPart1) +{ + mPart1 = nPart1; + mPart0 = nPart0; +} + +inline int128_t_base::int128_t_base(uint8_t value) +{ + mPart1 = 0; + mPart0 = value; +} + +inline int128_t_base::int128_t_base(uint16_t value) +{ + mPart1 = 0; + mPart0 = value; +} + +inline int128_t_base::int128_t_base(uint32_t value) +{ + mPart1 = 0; + mPart0 = value; +} + +inline int128_t_base::int128_t_base(uint64_t value) +{ + mPart1 = 0; + mPart0 = value; +} + +/////////////////////////////////////////////////////////////////////////////// +// OperatorPlus +// +// Returns: (value1 + value2) into result. +// The output 'result' *is* allowed to point to the same memory as one of the inputs. +// To consider: Fix 'defect' of this function whereby it doesn't implement overflow wraparound. +// +inline void int128_t_base::OperatorPlus(const int128_t_base& value1, const int128_t_base& value2, int128_t_base& result) +{ + uint64_t t = value1.mPart0 + value2.mPart0; + uint64_t nCarry = (t < value1.mPart0) && (t < value2.mPart0); + result.mPart0 = t; + result.mPart1 = value1.mPart1 + value2.mPart1 + nCarry; +} + +/////////////////////////////////////////////////////////////////////////////// +// OperatorMinus +// +// Returns: (value1 - value2) into result. +// The output 'result' *is* allowed to point to the same memory as one of the inputs. +// To consider: Fix 'defect' of this function whereby it doesn't implement overflow wraparound. +// +inline void int128_t_base::OperatorMinus(const int128_t_base& value1, const int128_t_base& value2, int128_t_base& result) +{ + uint64_t t = (value1.mPart0 - value2.mPart0); + uint64_t nCarry = (value1.mPart0 < value2.mPart0) ? 1u : 0u; + result.mPart0 = t; + result.mPart1 = (value1.mPart1 - value2.mPart1) - nCarry; +} + +/////////////////////////////////////////////////////////////////////////////// +// OperatorMul +// +// 64 bit systems: +// This is how it would be able to work if we could get a 128 bit result from +// two 64 bit values. None of the 64 bit systems that we are currently working +// with have C language support for multiplying two 64 bit numbers and retrieving +// the 128 bit result. However, many 64 bit platforms have support at the asm +// level for doing such a thing. +// Part 1 Part 0 +// 0000000000000002 0000000000000001 +// x 0000000000000002 0000000000000001 +// ------------------------------------------- +// | 0000000000000002 0000000000000001 +// + 0000000000000004 | 0000000000000002 (0000000000000000) +// ------------------------------------------------------------------------- +// +inline void int128_t_base::OperatorMul(const int128_t_base& a, const int128_t_base& b, int128_t_base& result) +{ + // To consider: Use compiler or OS-provided custom functionality here, such as + // Windows UnsignedMultiply128 and GCC's built-in int128_t. + + #if defined(DISABLED_PLATFORM_WIN64) + // To do: Implement x86-64 asm here. + + #else + // Else we are stuck doing something less efficient. In this case we + // fall back to doing 32 bit multiplies as with 32 bit platforms. + result = (a.mPart0 & 0xffffffff) * (b.mPart0 & 0xffffffff); + int128_t v01 = (a.mPart0 & 0xffffffff) * ((b.mPart0 >> 32) & 0xffffffff); + int128_t v02 = (a.mPart0 & 0xffffffff) * (b.mPart1 & 0xffffffff); + int128_t v03 = (a.mPart0 & 0xffffffff) * ((b.mPart1 >> 32) & 0xffffffff); + + int128_t v10 = ((a.mPart0 >> 32) & 0xffffffff) * (b.mPart0 & 0xffffffff); + int128_t v11 = ((a.mPart0 >> 32) & 0xffffffff) * ((b.mPart0 >> 32) & 0xffffffff); + int128_t v12 = ((a.mPart0 >> 32) & 0xffffffff) * (b.mPart1 & 0xffffffff); + + int128_t v20 = (a.mPart1 & 0xffffffff) * (b.mPart0 & 0xffffffff); + int128_t v21 = (a.mPart1 & 0xffffffff) * ((b.mPart0 >> 32) & 0xffffffff); + + int128_t v30 = ((a.mPart1 >> 32) & 0xffffffff) * (b.mPart0 & 0xffffffff); + + // Do row addition, shifting as needed. + OperatorPlus(result, v01 << 32, result); + OperatorPlus(result, v02 << 64, result); + OperatorPlus(result, v03 << 96, result); + + OperatorPlus(result, v10 << 32, result); + OperatorPlus(result, v11 << 64, result); + OperatorPlus(result, v12 << 96, result); + + OperatorPlus(result, v20 << 64, result); + OperatorPlus(result, v21 << 96, result); + + OperatorPlus(result, v30 << 96, result); + #endif +} + +/////////////////////////////////////////////////////////////////////////////// +// OperatorShiftRight +// +// Returns: value >> nShift into result +// The output 'result' may *not* be the same as one the input. +// With rightward shifts of negative numbers, shift in zero from the left side. +// +inline void int128_t_base::OperatorShiftRight(const int128_t_base& value, int nShift, int128_t_base& result) +{ + if(nShift >= 0) + { + if(nShift < 64) + { // 0 - 63 + result.mPart1 = (value.mPart1 >> nShift); + + if(nShift == 0) + result.mPart0 = (value.mPart0 >> nShift); + else + result.mPart0 = (value.mPart0 >> nShift) | (value.mPart1 << (64 - nShift)); + } + else + { // 64+ + result.mPart1 = 0; + result.mPart0 = (value.mPart1 >> (nShift - 64)); + } + } + else // (nShift < 0) + OperatorShiftLeft(value, -nShift, result); +} + + +/////////////////////////////////////////////////////////////////////////////// +// OperatorShiftRight +// +// Returns: value << nShift into result +// The output 'result' may *not* be the same as one the input. +// With rightward shifts of negative numbers, shift in zero from the left side. +// +inline void int128_t_base::OperatorShiftLeft(const int128_t_base& value, int nShift, int128_t_base& result) +{ + if(nShift >= 0) + { + if(nShift < 64) + { + if(nShift) // We need to have a special case because CPUs convert a shift by 64 to a no-op. + { + // 1 - 63 + result.mPart0 = (value.mPart0 << nShift); + result.mPart1 = (value.mPart1 << nShift) | (value.mPart0 >> (64 - nShift)); + } + else + { + result.mPart0 = value.mPart0; + result.mPart1 = value.mPart1; + } + } + else + { // 64+ + result.mPart0 = 0; + result.mPart1 = (value.mPart0 << (nShift - 64)); + } + } + else // (nShift < 0) + OperatorShiftRight(value, -nShift, result); +} + + +inline bool int128_t_base::operator!() const +{ + return (mPart0 == 0) && (mPart1 == 0); +} + + +/////////////////////////////////////////////////////////////////////////////// +// OperatorXOR +// +// Returns: value1 ^ value2 into result +// The output 'result' may be the same as one the input. +// +inline void int128_t_base::OperatorXOR(const int128_t_base& value1, const int128_t_base& value2, int128_t_base& result) +{ + result.mPart0 = (value1.mPart0 ^ value2.mPart0); + result.mPart1 = (value1.mPart1 ^ value2.mPart1); +} + + +/////////////////////////////////////////////////////////////////////////////// +// OperatorOR +// +// Returns: value1 | value2 into result +// The output 'result' may be the same as one the input. +// +inline void int128_t_base::OperatorOR(const int128_t_base& value1, const int128_t_base& value2, int128_t_base& result) +{ + result.mPart0 = (value1.mPart0 | value2.mPart0); + result.mPart1 = (value1.mPart1 | value2.mPart1); +} + + +/////////////////////////////////////////////////////////////////////////////// +// OperatorAND +// +// Returns: value1 & value2 into result +// The output 'result' may be the same as one the input. +// +inline void int128_t_base::OperatorAND(const int128_t_base& value1, const int128_t_base& value2, int128_t_base& result) +{ + result.mPart0 = (value1.mPart0 & value2.mPart0); + result.mPart1 = (value1.mPart1 & value2.mPart1); +} + + +inline bool int128_t_base::IsZero() const +{ + return (mPart0 == 0) && // Check mPart0 first as this will likely yield faster execution. + (mPart1 == 0); +} + + +inline void int128_t_base::SetZero() +{ + mPart1 = 0; + mPart0 = 0; +} + + +inline void int128_t_base::TwosComplement() +{ + mPart1 = ~mPart1; + mPart0 = ~mPart0; + + // What we want to do, but isn't available at this level: + // operator++(); + // Alternative: + int128_t_base one((uint32_t)1); + OperatorPlus(*this, one, *this); +} + + +inline void int128_t_base::InverseTwosComplement() +{ + // What we want to do, but isn't available at this level: + // operator--(); + // Alternative: + int128_t_base one((uint32_t)1); + OperatorMinus(*this, one, *this); + + mPart1 = ~mPart1; + mPart0 = ~mPart0; +} + + +inline void int128_t_base::DoubleToUint128(double value) +{ + // Currently this function is limited to 64 bits of integer input. + // We need to make a better version of this function. Perhaps we should implement + // it via dissecting the IEEE floating point format (sign, exponent, matissa). + // EA_ASSERT(fabs(value) < 18446744073709551616.0); // Assert that the input is <= 64 bits of integer. + + mPart1 = 0; + mPart0 = (value >= 0 ? (uint64_t)value : (uint64_t)-value); +} + + + + + +/////////////////////////////////////////////////////////////////////////////////////////////////////// +// uint128_t implementation +/////////////////////////////////////////////////////////////////////////////////////////////////////// + +inline uint128_t uint128_t::operator^(const uint128_t& other) const +{ + uint128_t temp; + uint128_t::OperatorXOR(*this, other, temp); + return temp; +} + +inline uint128_t uint128_t::operator|(const uint128_t& other) const +{ + uint128_t temp; + uint128_t::OperatorOR(*this, other, temp); + return temp; +} + +inline uint128_t uint128_t::operator&(const uint128_t& other) const +{ + uint128_t temp; + uint128_t::OperatorAND(*this, other, temp); + return temp; +} + +inline uint128_t& uint128_t::operator^=(const uint128_t& value) +{ + OperatorXOR(*this, value, *this); + return *this; +} + +inline uint128_t& uint128_t::operator|=(const uint128_t& value) +{ + OperatorOR(*this, value, *this); + return *this; +} + +inline uint128_t& uint128_t::operator&=(const uint128_t& value) +{ + OperatorAND(*this, value, *this); + return *this; +} + +// With rightward shifts of negative numbers, shift in zero from the left side. +inline uint128_t uint128_t::operator>>(int nShift) const +{ + uint128_t temp; + OperatorShiftRight(*this, nShift, temp); + return temp; +} + +// With rightward shifts of negative numbers, shift in zero from the left side. +inline uint128_t uint128_t::operator<<(int nShift) const +{ + uint128_t temp; + OperatorShiftLeft(*this, nShift, temp); + return temp; +} + +inline uint128_t& uint128_t::operator>>=(int nShift) +{ + uint128_t temp; + OperatorShiftRight(*this, nShift, temp); + *this = temp; + return *this; +} + +inline uint128_t& uint128_t::operator<<=(int nShift) +{ + uint128_t temp; + OperatorShiftLeft(*this, nShift, temp); + *this = temp; + return *this; +} + +inline uint128_t& uint128_t::operator+=(const uint128_t& value) +{ + OperatorPlus(*this, value, *this); + return *this; +} + +inline uint128_t& uint128_t::operator-=(const uint128_t& value) +{ + OperatorMinus(*this, value, *this); + return *this; +} + +inline uint128_t& uint128_t::operator*=(const uint128_t& value) +{ + *this = *this * value; + return *this; +} + +inline uint128_t& uint128_t::operator/=(const uint128_t& value) +{ + *this = *this / value; + return *this; +} + +inline uint128_t& uint128_t::operator%=(const uint128_t& value) +{ + *this = *this % value; + return *this; +} + +inline uint128_t uint128_t::operator+(const uint128_t& other) +{ + uint128_t temp; + uint128_t::OperatorPlus(*this, other, temp); + return temp; +} + +inline uint128_t uint128_t::operator-(const uint128_t& other) +{ + uint128_t temp; + uint128_t::OperatorMinus(*this, other, temp); + return temp; +} + +inline uint128_t uint128_t::operator*(const uint128_t& other) +{ + uint128_t returnValue; + int128_t_base::OperatorMul(*this, other, returnValue); + return returnValue; +} + +inline uint128_t uint128_t::operator/(const uint128_t& other) +{ + uint128_t remainder; + uint128_t quotient; + this->Modulus(other, quotient, remainder); + return quotient; +} + +inline uint128_t uint128_t::operator%(const uint128_t& other) +{ + uint128_t remainder; + uint128_t quotient; + this->Modulus(other, quotient, remainder); + return remainder; +} + +inline uint128_t uint128_t::operator+() const +{ + return *this; +} + +inline uint128_t uint128_t::operator~() const +{ + return uint128_t(~mPart0, ~mPart1); +} + +inline uint128_t& uint128_t::operator--() +{ + int128_t_base one((uint32_t)1); + OperatorMinus(*this, one, *this); + return *this; +} + +inline uint128_t uint128_t::operator--(int) +{ + uint128_t temp((uint32_t)1); + OperatorMinus(*this, temp, temp); + return temp; +} + +inline uint128_t uint128_t::operator++(int) +{ + uint128_t prev = *this; + uint128_t temp((uint32_t)1); + OperatorPlus(*this, temp, *this); + return prev; +} + +inline uint128_t& uint128_t::operator++() +{ + int128_t_base one((uint32_t)1); + OperatorPlus(*this, one, *this); + return *this; +} + +inline void uint128_t::Negate() +{ + TwosComplement(); +} + +inline uint128_t uint128_t::operator-() const +{ + uint128_t returnValue(*this); + returnValue.Negate(); + return returnValue; +} + +// This function forms the basis of all logical comparison functions. +// If value1 < value2, the return value is -1. +// If value1 == value2, the return value is 0. +// If value1 > value2, the return value is 1. +inline int uint128_t::compare(const uint128_t& other) const +{ + // Compare individual parts. At this point, the two numbers have the same sign. + if(mPart1 == other.mPart1) + { + if(mPart0 == other.mPart0) + return 0; + else if(mPart0 > other.mPart0) + return 1; + // return -1; //Just fall through to the end. + } + else if(mPart1 > other.mPart1) + return 1; + return -1; +} + +EA_DISABLE_VC_WARNING(4723) // warning C4723: potential divide by 0 +inline void uint128_t::Modulus(const uint128_t& divisor, uint128_t& quotient, uint128_t& remainder) const +{ + uint128_t tempDividend(*this); + uint128_t tempDivisor(divisor); + + if(tempDivisor.IsZero()) + { + // Force a divide by zero exception. + // We know that tempDivisor.mPart0 is zero. + quotient.mPart0 /= tempDivisor.mPart0; + } + else if(tempDividend.IsZero()) + { + quotient = uint128_t((uint32_t)0); + remainder = uint128_t((uint32_t)0); + } + else + { + remainder.SetZero(); + + for(int i(0); i < 128; i++) + { + remainder += (uint32_t)tempDividend.GetBit(127 - i); + const bool bBit(remainder >= tempDivisor); + quotient.SetBit(127 - i, bBit); + + if(bBit) + remainder -= tempDivisor; + + if((i != 127) && !remainder.IsZero()) + remainder <<= 1; + } + } +} +EA_RESTORE_VC_WARNING() + +inline bool uint128_t::operator==(const uint128_t& other) const +{ + return (mPart0 == other.mPart0) && // Check mPart0 first as this will likely yield faster execution. + (mPart1 == other.mPart1); +} + +inline bool uint128_t::operator< (const uint128_t& other) const { return (compare(other) < 0); } +inline bool uint128_t::operator!=(const uint128_t& other) const { return !(*this == other); } +inline bool uint128_t::operator> (const uint128_t& other) const { return other < *this; } +inline bool uint128_t::operator>=(const uint128_t& other) const { return !(*this < other); } +inline bool uint128_t::operator<=(const uint128_t& other) const { return !(other < *this); } + +inline bool uint128_t::IsNegative() const +{ // True if value < 0 + return false; +} + +inline bool uint128_t::IsPositive() const +{ + // True of value >= 0 + return true; +} + + + + + + +/////////////////////////////////////////////////////////////////////////////////////////////////////// +// int128_t implementation +/////////////////////////////////////////////////////////////////////////////////////////////////////// + +inline void int128_t::Negate() +{ + if (IsPositive()) + TwosComplement(); + else + InverseTwosComplement(); +} + +inline int128_t int128_t::operator-() const +{ + int128_t returnValue(*this); + returnValue.Negate(); + return returnValue; +} + +inline int128_t& int128_t::operator++() +{ + int128_t_base one((uint32_t)1); + OperatorPlus(*this, one, *this); + return *this; +} + +inline int128_t& int128_t::operator--() +{ + int128_t_base one((uint32_t)1); + OperatorMinus(*this, one, *this); + return *this; +} + +inline int128_t int128_t::operator++(int) +{ + int128_t prev = *this; + int128_t temp((uint32_t)1); + OperatorPlus(*this, temp, *this); + return prev; +} + +inline int128_t int128_t::operator--(int) +{ + int128_t temp((uint32_t)1); + OperatorMinus(*this, temp, temp); + return temp; +} + +inline int128_t int128_t::operator+() const +{ + return *this; +} + +inline int128_t int128_t::operator~() const +{ + return int128_t(~mPart0, ~mPart1); +} + +inline int128_t int128_t::operator+(const int128_t& other) +{ + int128_t temp; + int128_t::OperatorPlus(*this, other, temp); + return temp; +} + +inline int128_t int128_t::operator-(const int128_t& other) +{ + int128_t temp; + int128_t::OperatorMinus(*this, other, temp); + return temp; +} + +// This function forms the basis of all logical comparison functions. +// If value1 < value2, the return value is -1. +// If value1 == value2, the return value is 0. +// If value1 > value2, the return value is 1. +inline int int128_t::compare(const int128_t& other) const +{ + // Cache some values. Positive means >= 0. Negative means < 0 and thus means '!positive'. + const bool bValue1IsPositive( IsPositive()); + const bool bValue2IsPositive(other.IsPositive()); + + // Do positive/negative tests. + if(bValue1IsPositive != bValue2IsPositive) + return bValue1IsPositive ? 1 : -1; + + // Compare individual parts. At this point, the two numbers have the same sign. + if(mPart1 == other.mPart1) + { + if(mPart0 == other.mPart0) + return 0; + else if(mPart0 > other.mPart0) + return 1; + // return -1; //Just fall through to the end. + } + else if(mPart1 > other.mPart1) + return 1; + return -1; +} + +inline bool int128_t::operator==(const int128_t& other) const +{ + return (mPart0 == other.mPart0) && // Check mPart0 first as this will likely yield faster execution. + (mPart1 == other.mPart1); +} + +inline bool int128_t::operator!=(const int128_t& other) const +{ + return (mPart0 != other.mPart0) || // Check mPart0 first as this will likely yield faster execution. + (mPart1 != other.mPart1); +} + +inline bool int128_t::operator>(const int128_t& other) const +{ + return (compare(other) > 0); +} + +inline bool int128_t::operator>=(const int128_t& other) const +{ + return (compare(other) >= 0); +} + +inline bool int128_t::operator<(const int128_t& other) const +{ + return (compare(other) < 0); +} + +inline bool int128_t::operator<=(const int128_t& other) const +{ + return (compare(other) <= 0); +} + +inline bool int128_t::IsNegative() const +{ // True if value < 0 + return ((mPart1 & UINT64_C(0x8000000000000000)) != 0); +} + +inline bool int128_t::IsPositive() const +{ // True of value >= 0 + return ((mPart1 & UINT64_C(0x8000000000000000)) == 0); +} + +inline int128_t int128_t::operator*(const int128_t& other) +{ + int128_t a(*this); + int128_t b(other); + int128_t returnValue; + + // Correctly handle negative values + bool bANegative(false); + bool bBNegative(false); + + if(a.IsNegative()) + { + bANegative = true; + a.Negate(); + } + + if(b.IsNegative()) + { + bBNegative = true; + b.Negate(); + } + + int128_t_base::OperatorMul(a, b, returnValue); + + // Do negation as needed. + if(bANegative != bBNegative) + returnValue.Negate(); + + return returnValue; +} + +inline int128_t int128_t::operator/(const int128_t& other) +{ + int128_t remainder; + int128_t quotient; + this->Modulus(other, quotient, remainder); + return quotient; +} + +inline int128_t int128_t::operator<<(int nShift) const +{ + int128_t temp; + OperatorShiftLeft(*this, nShift, temp); + return temp; +} + +inline int128_t& int128_t::operator+=(const int128_t& value) +{ + OperatorPlus(*this, value, *this); + return *this; +} + +inline int128_t& int128_t::operator-=(const int128_t& value) +{ + OperatorMinus(*this, value, *this); + return *this; +} + +inline int128_t& int128_t::operator<<=(int nShift) +{ + int128_t temp; + OperatorShiftLeft(*this, nShift, temp); + *this = temp; + return *this; +} + +inline int128_t& int128_t::operator*=(const int128_t& value) +{ + *this = *this * value; + return *this; +} + +inline int128_t& int128_t::operator%=(const int128_t& value) +{ + *this = *this % value; + return *this; +} + +inline int128_t int128_t::operator%(const int128_t& other) +{ + int128_t remainder; + int128_t quotient; + this->Modulus(other, quotient, remainder); + return remainder; +} + +inline int128_t& int128_t::operator/=(const int128_t& value) +{ + *this = *this / value; + return *this; +} + +// With rightward shifts of negative numbers, shift in zero from the left side. +inline int128_t int128_t::operator>>(int nShift) const +{ + int128_t temp; + OperatorShiftRight(*this, nShift, temp); + return temp; +} + +inline int128_t& int128_t::operator>>=(int nShift) +{ + int128_t temp; + OperatorShiftRight(*this, nShift, temp); + *this = temp; + return *this; +} + +inline int128_t int128_t::operator^(const int128_t& other) const +{ + int128_t temp; + int128_t::OperatorXOR(*this, other, temp); + return temp; +} + +inline int128_t int128_t::operator|(const int128_t& other) const +{ + int128_t temp; + int128_t::OperatorOR(*this, other, temp); + return temp; +} + + +inline int128_t int128_t::operator&(const int128_t& other) const +{ + int128_t temp; + int128_t::OperatorAND(*this, other, temp); + return temp; +} + +inline int128_t& int128_t::operator^=(const int128_t& value) +{ + OperatorXOR(*this, value, *this); + return *this; +} + +inline int128_t& int128_t::operator|=(const int128_t& value) +{ + OperatorOR(*this, value, *this); + return *this; +} + +inline int128_t& int128_t::operator&=(const int128_t& value) +{ + OperatorAND(*this, value, *this); + return *this; +} + +EA_DISABLE_VC_WARNING(4723) // warning C4723: potential divide by 0 +inline void int128_t::Modulus(const int128_t& divisor, int128_t& quotient, int128_t& remainder) const +{ + int128_t tempDividend(*this); + int128_t tempDivisor(divisor); + + bool bDividendNegative = false; + bool bDivisorNegative = false; + + if(tempDividend.IsNegative()) + { + bDividendNegative = true; + tempDividend.Negate(); + } + if(tempDivisor.IsNegative()) + { + bDivisorNegative = true; + tempDivisor.Negate(); + } + + // Handle the special cases + if(tempDivisor.IsZero()) + { + // Force a divide by zero exception. + // We know that tempDivisor.mPart0 is zero. + quotient.mPart0 /= tempDivisor.mPart0; + } + else if(tempDividend.IsZero()) + { + quotient = int128_t((uint32_t)0); + remainder = int128_t((uint32_t)0); + } + else + { + remainder.SetZero(); + + for(int i(0); i < 128; i++) + { + remainder += (uint32_t)tempDividend.GetBit(127 - i); + const bool bBit(remainder >= tempDivisor); + quotient.SetBit(127 - i, bBit); + + if(bBit) + remainder -= tempDivisor; + + if((i != 127) && !remainder.IsZero()) + remainder <<= 1; + } + } + + if((bDividendNegative && !bDivisorNegative) || (!bDividendNegative && bDivisorNegative)) + { + // Ensure the following formula applies for negative dividends + // dividend = divisor * quotient + remainder + quotient.Negate(); + } +} +EA_RESTORE_VC_WARNING() + + + + + + +/////////////////////////////////////////////////////////////////////////////////////////////////////// +// INT128_C / UINT128_C +// +// The C99 language defines macros for portably defining constants of +// sized numeric types. For example, there might be: +// #define UINT64_C(x) x##ULL +// Since our int128 data type is not a built-in type, we can't define a +// UINT128_C macro as something that pastes ULLL at the end of the digits. +// Instead we define it to create a temporary that is constructed from a +// string of the digits. This will work in most cases that suffix pasting +// would work. +// +/* EA_CONSTEXPR */ inline uint128_t UINT128_C(uint64_t nPart1, uint64_t nPart0) { return uint128_t(nPart0, nPart1); } +/* EA_CONSTEXPR */ inline int128_t INT128_C(int64_t nPart1, int64_t nPart0) { return int128_t(static_cast(nPart0), static_cast(nPart1)); } + + + + +#endif // INCLUDED_int128_h + diff --git a/external/EASTL/include/EABase/nullptr.h b/external/EASTL/include/EABase/nullptr.h new file mode 100644 index 00000000..d6629d50 --- /dev/null +++ b/external/EASTL/include/EABase/nullptr.h @@ -0,0 +1,102 @@ +/*----------------------------------------------------------------------------- + * nullptr.h + * + * Copyright (c) Electronic Arts Inc. All rights reserved. + *---------------------------------------------------------------------------*/ + + +#include +#include + + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once /* Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. */ +#endif + + +#if defined(EA_COMPILER_CPP11_ENABLED) && !defined(EA_COMPILER_NO_NULLPTR) && !defined(EA_HAVE_nullptr_t_IMPL) + // The compiler supports nullptr, but the standard library doesn't implement a declaration for std::nullptr_t. So we provide one. + namespace std { typedef decltype(nullptr) nullptr_t; } +#endif + + + +#if defined(EA_COMPILER_NO_NULLPTR) // If the compiler lacks a native version... + + namespace std + { + class nullptr_t + { + public: + template // When tested a pointer, acts as 0. + operator T*() const + { return 0; } + + template // When tested as a member pointer, acts as 0. + operator T C::*() const + { return 0; } + + typedef void* (nullptr_t::*bool_)() const; + operator bool_() const // An rvalue of type std::nullptr_t can be converted to an rvalue of type bool; the resulting value is false. + { return false; } // We can't use operator bool(){ return false; } because bool is convertable to int which breaks other required functionality. + + // We can't enable this without generating warnings about nullptr being uninitialized after being used when created without "= {}". + //void* mSizeofVoidPtr; // sizeof(nullptr_t) == sizeof(void*). Needs to be public if nullptr_t is to be a POD. + + private: + void operator&() const; // Address cannot be taken. + }; + + inline nullptr_t nullptr_get() + { + nullptr_t n = { }; // std::nullptr exists. + return n; + } + + #if !defined(nullptr) // If somebody hasn't already defined nullptr in a custom way... + #define nullptr nullptr_get() + #endif + + } // namespace std + + + template + inline bool operator==(T* p, const std::nullptr_t) + { return p == 0; } + + template + inline bool operator==(const std::nullptr_t, T* p) + { return p == 0; } + + template + inline bool operator==(T U::* p, const std::nullptr_t) + { return p == 0; } + + template + inline bool operator==(const std::nullptr_t, T U::* p) + { return p == 0; } + + inline bool operator==(const std::nullptr_t, const std::nullptr_t) + { return true; } + + inline bool operator!=(const std::nullptr_t, const std::nullptr_t) + { return false; } + + inline bool operator<(const std::nullptr_t, const std::nullptr_t) + { return false; } + + inline bool operator>(const std::nullptr_t, const std::nullptr_t) + { return false; } + + inline bool operator<=(const std::nullptr_t, const std::nullptr_t) + { return true; } + + inline bool operator>=(const std::nullptr_t, const std::nullptr_t) + { return true; } + + + using std::nullptr_t; // exported to global namespace. + using std::nullptr_get; // exported to global namespace. + +#endif // EA_COMPILER_NO_NULLPTR + diff --git a/external/EASTL/include/EABase/version.h b/external/EASTL/include/EABase/version.h new file mode 100644 index 00000000..b6e1b665 --- /dev/null +++ b/external/EASTL/include/EABase/version.h @@ -0,0 +1,36 @@ +/*----------------------------------------------------------------------------- + * version.h + * + * Copyright (c) Electronic Arts Inc. All rights reserved. + *---------------------------------------------------------------------------*/ + +#ifndef INCLUDED_EABASE_VERSION_H +#define INCLUDED_EABASE_VERSION_H + +/////////////////////////////////////////////////////////////////////////////// +// EABASE_VERSION +// +// We more or less follow the conventional EA packaging approach to versioning +// here. A primary distinction here is that minor versions are defined as two +// digit entities (e.g. .03") instead of minimal digit entities ".3"). The logic +// here is that the value is a counter and not a floating point fraction. +// Note that the major version doesn't have leading zeros. +// +// Example version strings: +// "0.91.00" // Major version 0, minor version 91, patch version 0. +// "1.00.00" // Major version 1, minor and patch version 0. +// "3.10.02" // Major version 3, minor version 10, patch version 02. +// "12.03.01" // Major version 12, minor version 03, patch version +// +// Example usage: +// printf("EABASE version: %s", EABASE_VERSION); +// printf("EABASE version: %d.%d.%d", EABASE_VERSION_N / 10000 % 100, EABASE_VERSION_N / 100 % 100, EABASE_VERSION_N % 100); +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EABASE_VERSION + #define EABASE_VERSION "2.09.12" + #define EABASE_VERSION_N 20912 +#endif + +#endif diff --git a/external/EASTL/include/EASTL/algorithm.h b/external/EASTL/include/EASTL/algorithm.h new file mode 100644 index 00000000..2101231b --- /dev/null +++ b/external/EASTL/include/EASTL/algorithm.h @@ -0,0 +1,4416 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// This file implements some of the primary algorithms from the C++ STL +// algorithm library. These versions are just like that STL versions and so +// are redundant. They are provided solely for the purpose of projects that +// either cannot use standard C++ STL or want algorithms that have guaranteed +// identical behaviour across platforms. +/////////////////////////////////////////////////////////////////////////////// + + +/////////////////////////////////////////////////////////////////////////////// +// Definitions +// +// You will notice that we are very particular about the templated typenames +// we use here. You will notice that we follow the C++ standard closely in +// these respects. Each of these typenames have a specific meaning; +// this is why we don't just label templated arguments with just letters +// such as T, U, V, A, B. Here we provide a quick reference for the typenames +// we use. See the C++ standard, section 25-8 for more details. +// -------------------------------------------------------------- +// typename Meaning +// -------------------------------------------------------------- +// T The value type. +// Compare A function which takes two arguments and returns the lesser of the two. +// Predicate A function which takes one argument returns true if the argument meets some criteria. +// BinaryPredicate A function which takes two arguments and returns true if some criteria is met (e.g. they are equal). +// StrickWeakOrdering A BinaryPredicate that compares two objects, returning true if the first precedes the second. Like Compare but has additional requirements. Used for sorting routines. +// Function A function which takes one argument and applies some operation to the target. +// Size A count or size. +// Generator A function which takes no arguments and returns a value (which will usually be assigned to an object). +// UnaryOperation A function which takes one argument and returns a value (which will usually be assigned to second object). +// BinaryOperation A function which takes two arguments and returns a value (which will usually be assigned to a third object). +// InputIterator An input iterator (iterator you read from) which allows reading each element only once and only in a forward direction. +// ForwardIterator An input iterator which is like InputIterator except it can be reset back to the beginning. +// BidirectionalIterator An input iterator which is like ForwardIterator except it can be read in a backward direction as well. +// RandomAccessIterator An input iterator which can be addressed like an array. It is a superset of all other input iterators. +// OutputIterator An output iterator (iterator you write to) which allows writing each element only once in only in a forward direction. +// +// Note that with iterators that a function which takes an InputIterator will +// also work with a ForwardIterator, BidirectionalIterator, or RandomAccessIterator. +// The given iterator type is merely the -minimum- supported functionality the +// iterator must support. +/////////////////////////////////////////////////////////////////////////////// + + +/////////////////////////////////////////////////////////////////////////////// +// Optimizations +// +// There are a number of opportunities for optimizations that we take here +// in this library. The most obvious kinds are those that subsitute memcpy +// in the place of a conventional loop for data types with which this is +// possible. The algorithms here are optimized to a higher level than currently +// available C++ STL algorithms from vendors such as Microsoft. This is especially +// so for game programming on console devices, as we do things such as reduce +// branching relative to other STL algorithm implementations. However, the +// proper implementation of these algorithm optimizations is a fairly tricky +// thing. +// +// The various things we look to take advantage of in order to implement +// optimizations include: +// - Taking advantage of random access iterators. +// - Taking advantage of trivially copyable data types (types for which it is safe to memcpy or memmove). +// - Taking advantage of type_traits in general. +// - Reducing branching and taking advantage of likely branch predictions. +// - Taking advantage of issues related to pointer and reference aliasing. +// - Improving cache coherency during memory accesses. +// - Making code more likely to be inlinable by the compiler. +// +/////////////////////////////////////////////////////////////////////////////// + + +/////////////////////////////////////////////////////////////////////////////// +// Supported Algorithms +// +// Algorithms that we implement are listed here. Note that these items are not +// all within this header file, as we split up the header files in order to +// improve compilation performance. Items marked with '+' are items that are +// extensions which don't exist in the C++ standard. +// +// ------------------------------------------------------------------------------- +// Algorithm Notes +// ------------------------------------------------------------------------------- +// adjacent_find +// adjacent_find +// all_of C++11 +// any_of C++11 +// none_of C++11 +// binary_search +// binary_search +// +binary_search_i +// +binary_search_i +// +change_heap Found in heap.h +// +change_heap Found in heap.h +// clamp +// copy +// copy_if C++11 +// copy_n C++11 +// copy_backward +// count +// count_if +// equal +// equal +// equal_range +// equal_range +// fill +// fill_n +// find +// find_end +// find_end +// find_first_of +// find_first_of +// +find_first_not_of +// +find_first_not_of +// +find_last_of +// +find_last_of +// +find_last_not_of +// +find_last_not_of +// find_if +// find_if_not +// for_each +// generate +// generate_n +// +identical +// +identical +// iter_swap +// lexicographical_compare +// lexicographical_compare +// lexicographical_compare_three_way +// lower_bound +// lower_bound +// make_heap Found in heap.h +// make_heap Found in heap.h +// min +// min +// max +// max +// +min_alt Exists to work around the problem of conflicts with min/max #defines on some systems. +// +min_alt +// +max_alt +// +max_alt +// +median +// +median +// merge Found in sort.h +// merge Found in sort.h +// min_element +// min_element +// max_element +// max_element +// mismatch +// mismatch +// move +// move_backward +// nth_element Found in sort.h +// nth_element Found in sort.h +// partial_sort Found in sort.h +// partial_sort Found in sort.h +// push_heap Found in heap.h +// push_heap Found in heap.h +// pop_heap Found in heap.h +// pop_heap Found in heap.h +// random_shuffle +// remove +// remove_if +// +apply_and_remove +// +apply_and_remove_if +// remove_copy +// remove_copy_if +// +remove_heap Found in heap.h +// +remove_heap Found in heap.h +// replace +// replace_if +// replace_copy +// replace_copy_if +// reverse_copy +// reverse +// random_shuffle +// rotate +// rotate_copy +// search +// search +// search_n +// set_difference +// set_difference +// set_difference_2 +// set_difference_2 +// set_decomposition +// set_decomposition +// set_intersection +// set_intersection +// set_symmetric_difference +// set_symmetric_difference +// set_union +// set_union +// sort Found in sort.h +// sort Found in sort.h +// sort_heap Found in heap.h +// sort_heap Found in heap.h +// stable_sort Found in sort.h +// stable_sort Found in sort.h +// partition Found in sort.h +// stable_partition Found in sort.h +// swap +// swap_ranges +// transform +// transform +// unique +// unique +// upper_bound +// upper_bound +// is_permutation +// is_permutation +// next_permutation +// next_permutation +// is_partitioned +// partition_point +// +// Algorithms from the C++ standard that we don't implement are listed here. +// Most of these items are absent because they aren't used very often. +// They also happen to be the more complicated than other algorithms. +// However, we can implement any of these functions for users that might +// need them. +// includes +// includes +// inplace_merge +// inplace_merge +// partial_sort_copy +// partial_sort_copy +// prev_permutation +// prev_permutation +// search_n +// unique_copy +// unique_copy +// +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ALGORITHM_H +#define EASTL_ALGORITHM_H + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +EA_DISABLE_ALL_VC_WARNINGS(); + + #if defined(EA_COMPILER_MSVC) && (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64)) + #include + #endif + + #include + #include // memcpy, memcmp, memmove + +EA_RESTORE_ALL_VC_WARNINGS(); + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// min/max workaround +// +// MSVC++ has #defines for min/max which collide with the min/max algorithm +// declarations. The following may still not completely resolve some kinds of +// problems with MSVC++ #defines, though it deals with most cases in production +// game code. +// +#if EASTL_NOMINMAX + #ifdef min + #undef min + #endif + #ifdef max + #undef max + #endif +#endif + + + + +namespace eastl +{ + /// min_element + /// + /// min_element finds the smallest element in the range [first, last). + /// It returns the first iterator i in [first, last) such that no other + /// iterator in [first, last) points to a value smaller than *i. + /// The return value is last if and only if [first, last) is an empty range. + /// + /// Returns: The first iterator i in the range [first, last) such that + /// for any iterator j in the range [first, last) the following corresponding + /// condition holds: !(*j < *i). + /// + /// Complexity: Exactly 'max((last - first) - 1, 0)' applications of the + /// corresponding comparisons. + /// + template + ForwardIterator min_element(ForwardIterator first, ForwardIterator last) + { + if(first != last) + { + ForwardIterator currentMin = first; + + while(++first != last) + { + if(*first < *currentMin) + currentMin = first; + } + return currentMin; + } + return first; + } + + + /// min_element + /// + /// min_element finds the smallest element in the range [first, last). + /// It returns the first iterator i in [first, last) such that no other + /// iterator in [first, last) points to a value smaller than *i. + /// The return value is last if and only if [first, last) is an empty range. + /// + /// Returns: The first iterator i in the range [first, last) such that + /// for any iterator j in the range [first, last) the following corresponding + /// conditions hold: compare(*j, *i) == false. + /// + /// Complexity: Exactly 'max((last - first) - 1, 0)' applications of the + /// corresponding comparisons. + /// + template + ForwardIterator min_element(ForwardIterator first, ForwardIterator last, Compare compare) + { + if(first != last) + { + ForwardIterator currentMin = first; + + while(++first != last) + { + if(compare(*first, *currentMin)) + currentMin = first; + } + return currentMin; + } + return first; + } + + + /// max_element + /// + /// max_element finds the largest element in the range [first, last). + /// It returns the first iterator i in [first, last) such that no other + /// iterator in [first, last) points to a value greater than *i. + /// The return value is last if and only if [first, last) is an empty range. + /// + /// Returns: The first iterator i in the range [first, last) such that + /// for any iterator j in the range [first, last) the following corresponding + /// condition holds: !(*i < *j). + /// + /// Complexity: Exactly 'max((last - first) - 1, 0)' applications of the + /// corresponding comparisons. + /// + template + ForwardIterator max_element(ForwardIterator first, ForwardIterator last) + { + if(first != last) + { + ForwardIterator currentMax = first; + + while(++first != last) + { + if(*currentMax < *first) + currentMax = first; + } + return currentMax; + } + return first; + } + + + /// max_element + /// + /// max_element finds the largest element in the range [first, last). + /// It returns the first iterator i in [first, last) such that no other + /// iterator in [first, last) points to a value greater than *i. + /// The return value is last if and only if [first, last) is an empty range. + /// + /// Returns: The first iterator i in the range [first, last) such that + /// for any iterator j in the range [first, last) the following corresponding + /// condition holds: compare(*i, *j) == false. + /// + /// Complexity: Exactly 'max((last - first) - 1, 0)' applications of the + /// corresponding comparisons. + /// + template + ForwardIterator max_element(ForwardIterator first, ForwardIterator last, Compare compare) + { + if(first != last) + { + ForwardIterator currentMax = first; + + while(++first != last) + { + if(compare(*currentMax, *first)) + currentMax = first; + } + return currentMax; + } + return first; + } + + + #if EASTL_MINMAX_ENABLED + + /// min + /// + /// Min returns the lesser of its two arguments; it returns the first + /// argument if neither is less than the other. The two arguments are + /// compared with operator <. + /// + /// This min and our other min implementations are defined as returning: + /// b < a ? b : a + /// which for example may in practice result in something different than: + /// b <= a ? b : a + /// in the case where b is different from a (though they compare as equal). + /// We choose the specific ordering here because that's the ordering + /// done by other STL implementations. + /// + /// Some compilers (e.g. VS20003 - VS2013) generate poor code for the case of + /// scalars returned by reference, so we provide a specialization for those cases. + /// The specialization returns T by value instead of reference, which is + /// not that the Standard specifies. The Standard allows you to use + /// an expression like &max(x, y), which would be impossible in this case. + /// However, we have found no actual code that uses min or max like this and + /// this specialization causes no problems in practice. Microsoft has acknowledged + /// the problem and may fix it for a future VS version. + /// + template + inline EA_CONSTEXPR typename eastl::enable_if::value, T>::type + min(T a, T b) + { + return b < a ? b : a; + } + + template + inline EA_CONSTEXPR typename eastl::enable_if::value, const T&>::type + min(const T& a, const T& b) + { + return b < a ? b : a; + } + + inline EA_CONSTEXPR float min(float a, float b) { return b < a ? b : a; } + inline EA_CONSTEXPR double min(double a, double b) { return b < a ? b : a; } + inline EA_CONSTEXPR long double min(long double a, long double b) { return b < a ? b : a; } + + #endif // EASTL_MINMAX_ENABLED + + + /// min_alt + /// + /// This is an alternative version of min that avoids any possible + /// collisions with Microsoft #defines of min and max. + /// + /// See min(a, b) for detailed specifications. + /// + template + inline EA_CONSTEXPR typename eastl::enable_if::value, T>::type + min_alt(T a, T b) + { + return b < a ? b : a; + } + + template + inline typename eastl::enable_if::value, const T&>::type + min_alt(const T& a, const T& b) + { + return b < a ? b : a; + } + + inline EA_CONSTEXPR float min_alt(float a, float b) { return b < a ? b : a; } + inline EA_CONSTEXPR double min_alt(double a, double b) { return b < a ? b : a; } + inline EA_CONSTEXPR long double min_alt(long double a, long double b) { return b < a ? b : a; } + + + #if EASTL_MINMAX_ENABLED + + /// min + /// + /// Min returns the lesser of its two arguments; it returns the first + /// argument if neither is less than the other. The two arguments are + /// compared with the Compare function (or function object), which + /// takes two arguments and returns true if the first is less than + /// the second. + /// + /// See min(a, b) for detailed specifications. + /// + /// Example usage: + /// struct A{ int a; }; + /// struct Struct{ bool operator()(const A& a1, const A& a2){ return a1.a < a2.a; } }; + /// + /// A a1, a2, a3; + /// a3 = min(a1, a2, Struct()); + /// + /// Example usage: + /// struct B{ int b; }; + /// inline bool Function(const B& b1, const B& b2){ return b1.b < b2.b; } + /// + /// B b1, b2, b3; + /// b3 = min(b1, b2, Function); + /// + template + inline const T& + min(const T& a, const T& b, Compare compare) + { + return compare(b, a) ? b : a; + } + + #endif // EASTL_MINMAX_ENABLED + + + /// min_alt + /// + /// This is an alternative version of min that avoids any possible + /// collisions with Microsoft #defines of min and max. + /// + /// See min(a, b) for detailed specifications. + /// + template + inline const T& + min_alt(const T& a, const T& b, Compare compare) + { + return compare(b, a) ? b : a; + } + + + #if EASTL_MINMAX_ENABLED + + /// max + /// + /// Max returns the greater of its two arguments; it returns the first + /// argument if neither is greater than the other. The two arguments are + /// compared with operator < (and not operator >). + /// + /// This min and our other min implementations are defined as returning: + /// a < b ? b : a + /// which for example may in practice result in something different than: + /// a <= b ? b : a + /// in the case where b is different from a (though they compare as equal). + /// We choose the specific ordering here because that's the ordering + /// done by other STL implementations. + /// + template + inline EA_CONSTEXPR typename eastl::enable_if::value, T>::type + max(T a, T b) + { + return a < b ? b : a; + } + + template + inline EA_CONSTEXPR typename eastl::enable_if::value, const T&>::type + max(const T& a, const T& b) + { + return a < b ? b : a; + } + + inline EA_CONSTEXPR float max(float a, float b) { return a < b ? b : a; } + inline EA_CONSTEXPR double max(double a, double b) { return a < b ? b : a; } + inline EA_CONSTEXPR long double max(long double a, long double b) { return a < b ? b : a; } + + #endif // EASTL_MINMAX_ENABLED + + + /// max_alt + /// + /// This is an alternative version of max that avoids any possible + /// collisions with Microsoft #defines of min and max. + /// + template + inline EA_CONSTEXPR typename eastl::enable_if::value, T>::type + max_alt(T a, T b) + { + return a < b ? b : a; + } + + template + inline EA_CONSTEXPR typename eastl::enable_if::value, const T&>::type + max_alt(const T& a, const T& b) + { + return a < b ? b : a; + } + + inline EA_CONSTEXPR float max_alt(float a, float b) { return a < b ? b : a; } + inline EA_CONSTEXPR double max_alt(double a, double b) { return a < b ? b : a; } + inline EA_CONSTEXPR long double max_alt(long double a, long double b) { return a < b ? b : a; } + + + #if EASTL_MINMAX_ENABLED + /// max + /// + /// Min returns the lesser of its two arguments; it returns the first + /// argument if neither is less than the other. The two arguments are + /// compared with the Compare function (or function object), which + /// takes two arguments and returns true if the first is less than + /// the second. + /// + template + inline const T& + max(const T& a, const T& b, Compare compare) + { + return compare(a, b) ? b : a; + } + #endif + + + /// max_alt + /// + /// This is an alternative version of max that avoids any possible + /// collisions with Microsoft #defines of min and max. + /// + template + inline const T& + max_alt(const T& a, const T& b, Compare compare) + { + return compare(a, b) ? b : a; + } + + + /// min(std::initializer_list) + /// + template + T min(std::initializer_list ilist) + { + return *eastl::min_element(ilist.begin(), ilist.end()); + } + + /// min(std::initializer_list, Compare) + /// + template + T min(std::initializer_list ilist, Compare compare) + { + return *eastl::min_element(ilist.begin(), ilist.end(), compare); + } + + + /// max(std::initializer_list) + /// + template + T max(std::initializer_list ilist) + { + return *eastl::max_element(ilist.begin(), ilist.end()); + } + + /// max(std::initializer_list, Compare) + /// + template + T max(std::initializer_list ilist, Compare compare) + { + return *eastl::max_element(ilist.begin(), ilist.end(), compare); + } + + + /// minmax_element + /// + /// Returns: make_pair(first, first) if [first, last) is empty, otherwise make_pair(m, M), + /// where m is the first iterator in [first,last) such that no iterator in the range + /// refers to a smaller element, and where M is the last iterator in [first,last) such + /// that no iterator in the range refers to a larger element. + /// + /// Complexity: At most max([(3/2)*(N - 1)], 0) applications of the corresponding predicate, + /// where N is distance(first, last). + /// + template + eastl::pair + minmax_element(ForwardIterator first, ForwardIterator last, Compare compare) + { + eastl::pair result(first, first); + + if(!(first == last) && !(++first == last)) + { + if(compare(*first, *result.first)) + { + result.second = result.first; + result.first = first; + } + else + result.second = first; + + while(++first != last) + { + ForwardIterator i = first; + + if(++first == last) + { + if(compare(*i, *result.first)) + result.first = i; + else if(!compare(*i, *result.second)) + result.second = i; + break; + } + else + { + if(compare(*first, *i)) + { + if(compare(*first, *result.first)) + result.first = first; + + if(!compare(*i, *result.second)) + result.second = i; + } + else + { + if(compare(*i, *result.first)) + result.first = i; + + if(!compare(*first, *result.second)) + result.second = first; + } + } + } + } + + return result; + } + + + template + eastl::pair + minmax_element(ForwardIterator first, ForwardIterator last) + { + typedef typename eastl::iterator_traits::value_type value_type; + + return eastl::minmax_element(first, last, eastl::less()); + } + + + + /// minmax + /// + /// Requires: Type T shall be LessThanComparable. + /// Returns: pair(b, a) if b is smaller than a, and pair(a, b) otherwise. + /// Remarks: Returns pair(a, b) when the arguments are equivalent. + /// Complexity: Exactly one comparison. + /// + + // The following optimization is a problem because it changes the return value in a way that would break + // users unless they used auto (e.g. auto result = minmax(17, 33); ) + // + // template + // inline EA_CONSTEXPR typename eastl::enable_if::value, eastl::pair >::type + // minmax(T a, T b) + // { + // return (b < a) ? eastl::make_pair(b, a) : eastl::make_pair(a, b); + // } + // + // template + // inline typename eastl::enable_if::value, eastl::pair >::type + // minmax(const T& a, const T& b) + // { + // return (b < a) ? eastl::make_pair(b, a) : eastl::make_pair(a, b); + // } + + // It turns out that the following conforming definition of minmax generates a warning when used with VC++ up + // to at least VS2012. The VS2012 version of minmax is a broken and non-conforming definition, and we don't + // want to do that. We could do it for scalars alone, though we'd have to decide if we are going to do that + // for all compilers, because it changes the return value from a pair of references to a pair of values. + template + inline eastl::pair + minmax(const T& a, const T& b) + { + return (b < a) ? eastl::make_pair(b, a) : eastl::make_pair(a, b); + } + + + template + eastl::pair + minmax(const T& a, const T& b, Compare compare) + { + return compare(b, a) ? eastl::make_pair(b, a) : eastl::make_pair(a, b); + } + + + + template + eastl::pair + minmax(std::initializer_list ilist) + { + typedef typename std::initializer_list::iterator iterator_type; + eastl::pair iteratorPair = eastl::minmax_element(ilist.begin(), ilist.end()); + return eastl::make_pair(*iteratorPair.first, *iteratorPair.second); + } + + template + eastl::pair + minmax(std::initializer_list ilist, Compare compare) + { + typedef typename std::initializer_list::iterator iterator_type; + eastl::pair iteratorPair = eastl::minmax_element(ilist.begin(), ilist.end(), compare); + return eastl::make_pair(*iteratorPair.first, *iteratorPair.second); + } + + template + inline T&& median_impl(T&& a, T&& b, T&& c) + { + if(a < b) + { + if(b < c) + return eastl::forward(b); + else if(a < c) + return eastl::forward(c); + else + return eastl::forward(a); + } + else if(a < c) + return eastl::forward(a); + else if(b < c) + return eastl::forward(c); + return eastl::forward(b); + } + + /// median + /// + /// median finds which element of three (a, b, d) is in-between the other two. + /// If two or more elements are equal, the first (e.g. a before b) is chosen. + /// + /// Complexity: Either two or three comparisons will be required, depending + /// on the values. + /// + template + inline const T& median(const T& a, const T& b, const T& c) + { + return median_impl(a, b, c); + } + + /// median + /// + /// median finds which element of three (a, b, d) is in-between the other two. + /// If two or more elements are equal, the first (e.g. a before b) is chosen. + /// + /// Complexity: Either two or three comparisons will be required, depending + /// on the values. + /// + template + inline T&& median(T&& a, T&& b, T&& c) + { + return eastl::forward(median_impl(eastl::forward(a), eastl::forward(b), eastl::forward(c))); + } + + + template + inline T&& median_impl(T&& a, T&& b, T&& c, Compare compare) + { + if(compare(a, b)) + { + if(compare(b, c)) + return eastl::forward(b); + else if(compare(a, c)) + return eastl::forward(c); + else + return eastl::forward(a); + } + else if(compare(a, c)) + return eastl::forward(a); + else if(compare(b, c)) + return eastl::forward(c); + return eastl::forward(b); + } + + + /// median + /// + /// median finds which element of three (a, b, d) is in-between the other two. + /// If two or more elements are equal, the first (e.g. a before b) is chosen. + /// + /// Complexity: Either two or three comparisons will be required, depending + /// on the values. + /// + template + inline const T& median(const T& a, const T& b, const T& c, Compare compare) + { + return median_impl(a, b, c, compare); + } + + /// median + /// + /// median finds which element of three (a, b, d) is in-between the other two. + /// If two or more elements are equal, the first (e.g. a before b) is chosen. + /// + /// Complexity: Either two or three comparisons will be required, depending + /// on the values. + /// + template + inline T&& median(T&& a, T&& b, T&& c, Compare compare) + { + return eastl::forward(median_impl(eastl::forward(a), eastl::forward(b), eastl::forward(c), compare)); + } + + + + + /// all_of + /// + /// Returns: true if the unary predicate p returns true for all elements in the range [first, last) + /// + template + inline bool all_of(InputIterator first, InputIterator last, Predicate p) + { + for(; first != last; ++first) + { + if(!p(*first)) + return false; + } + return true; + } + + + /// any_of + /// + /// Returns: true if the unary predicate p returns true for any of the elements in the range [first, last) + /// + template + inline bool any_of(InputIterator first, InputIterator last, Predicate p) + { + for(; first != last; ++first) + { + if(p(*first)) + return true; + } + return false; + } + + + /// none_of + /// + /// Returns: true if the unary predicate p returns true for none of the elements in the range [first, last) + /// + template + inline bool none_of(InputIterator first, InputIterator last, Predicate p) + { + for(; first != last; ++first) + { + if(p(*first)) + return false; + } + return true; + } + + + /// adjacent_find + /// + /// Returns: The first iterator i such that both i and i + 1 are in the range + /// [first, last) for which the following corresponding conditions hold: *i == *(i + 1). + /// Returns last if no such iterator is found. + /// + /// Complexity: Exactly 'find(first, last, value) - first' applications of the corresponding predicate. + /// + template + inline ForwardIterator + adjacent_find(ForwardIterator first, ForwardIterator last) + { + if(first != last) + { + ForwardIterator i = first; + + for(++i; i != last; ++i) + { + if(*first == *i) + return first; + first = i; + } + } + return last; + } + + + + /// adjacent_find + /// + /// Returns: The first iterator i such that both i and i + 1 are in the range + /// [first, last) for which the following corresponding conditions hold: predicate(*i, *(i + 1)) != false. + /// Returns last if no such iterator is found. + /// + /// Complexity: Exactly 'find(first, last, value) - first' applications of the corresponding predicate. + /// + template + inline ForwardIterator + adjacent_find(ForwardIterator first, ForwardIterator last, BinaryPredicate predicate) + { + if(first != last) + { + ForwardIterator i = first; + + for(++i; i != last; ++i) + { + if(predicate(*first, *i)) + return first; + first = i; + } + } + return last; + } + + + /// shuffle + /// + /// New for C++11 + /// Randomizes a sequence of values via a user-supplied UniformRandomNumberGenerator. + /// The difference between this and the original random_shuffle function is that this uses the more + /// advanced and flexible UniformRandomNumberGenerator interface as opposed to the more + /// limited RandomNumberGenerator interface of random_shuffle. + /// + /// Effects: Shuffles the elements in the range [first, last) with uniform distribution. + /// + /// Complexity: Exactly '(last - first) - 1' swaps. + /// + /// Example usage: + /// struct Rand{ eastl_size_t operator()(eastl_size_t n) { return (eastl_size_t)(rand() % n); } }; // Note: The C rand function is poor and slow. + /// Rand randInstance; + /// shuffle(pArrayBegin, pArrayEnd, randInstance); + /// + // See the C++11 Standard, 26.5.1.3, Uniform random number generator requirements. + // Also http://en.cppreference.com/w/cpp/numeric/random/uniform_int_distribution + + template + void shuffle(RandomAccessIterator first, RandomAccessIterator last, UniformRandomNumberGenerator&& urng) + { + if(first != last) + { + typedef typename eastl::iterator_traits::difference_type difference_type; + typedef typename eastl::make_unsigned::type unsigned_difference_type; + typedef typename eastl::uniform_int_distribution uniform_int_distribution; + typedef typename uniform_int_distribution::param_type uniform_int_distribution_param_type; + + uniform_int_distribution uid; + + for(RandomAccessIterator i = first + 1; i != last; ++i) + iter_swap(i, first + uid(urng, uniform_int_distribution_param_type(0, i - first))); + } + } + + + /// random_shuffle + /// + /// Randomizes a sequence of values. + /// + /// Effects: Shuffles the elements in the range [first, last) with uniform distribution. + /// + /// Complexity: Exactly '(last - first) - 1' swaps. + /// + /// Example usage: + /// eastl_size_t Rand(eastl_size_t n) { return (eastl_size_t)(rand() % n); } // Note: The C rand function is poor and slow. + /// pointer_to_unary_function randInstance(Rand); + /// random_shuffle(pArrayBegin, pArrayEnd, randInstance); + /// + /// Example usage: + /// struct Rand{ eastl_size_t operator()(eastl_size_t n) { return (eastl_size_t)(rand() % n); } }; // Note: The C rand function is poor and slow. + /// Rand randInstance; + /// random_shuffle(pArrayBegin, pArrayEnd, randInstance); + /// + template + inline void random_shuffle(RandomAccessIterator first, RandomAccessIterator last, RandomNumberGenerator&& rng) + { + typedef typename eastl::iterator_traits::difference_type difference_type; + + // We must do 'rand((i - first) + 1)' here and cannot do 'rand(last - first)', + // as it turns out that the latter results in unequal distribution probabilities. + // http://www.cigital.com/papers/download/developer_gambling.php + + const difference_type swapMax = eastl::distance(first, last); + + // deliberately start at 1. + for (difference_type swapIter = 1; swapIter < swapMax; ++swapIter) + { + RandomAccessIterator i = first + swapIter; + iter_swap(i, first + (difference_type)rng((eastl_size_t)((i - first) + 1))); + } + } + + + /// random_shuffle + /// + /// Randomizes a sequence of values. + /// + /// Effects: Shuffles the elements in the range [first, last) with uniform distribution. + /// + /// Complexity: Exactly '(last - first) - 1' swaps. + /// + /// Example usage: + /// random_shuffle(pArrayBegin, pArrayEnd); + /// + /// *** Disabled until we decide if we want to get into the business of writing random number generators. *** + /// + /// template + /// inline void random_shuffle(RandomAccessIterator first, RandomAccessIterator last) + /// { + /// for(RandomAccessIterator i = first + 1; i < last; ++i) + /// iter_swap(i, first + SomeRangedRandomNumberGenerator((i - first) + 1)); + /// } + + + + + + + /// move_n + /// + /// Same as move(InputIterator, InputIterator, OutputIterator) except based on count instead of iterator range. + /// + template + inline OutputIterator + move_n_impl(InputIterator first, Size n, OutputIterator result, EASTL_ITC_NS::input_iterator_tag) + { + for(; n > 0; --n) + *result++ = eastl::move(*first++); + return result; + } + + template + inline OutputIterator + move_n_impl(RandomAccessIterator first, Size n, OutputIterator result, EASTL_ITC_NS::random_access_iterator_tag) + { + return eastl::move(first, first + n, result); // Take advantage of the optimizations present in the move algorithm. + } + + + template + inline OutputIterator + move_n(InputIterator first, Size n, OutputIterator result) + { + typedef typename eastl::iterator_traits::iterator_category IC; + return eastl::move_n_impl(first, n, result, IC()); + } + + + + /// copy_n + /// + /// Same as copy(InputIterator, InputIterator, OutputIterator) except based on count instead of iterator range. + /// Effects: Copies exactly count values from the range beginning at first to the range beginning at result, if count > 0. Does nothing otherwise. + /// Returns: Iterator in the destination range, pointing past the last element copied if count>0 or first otherwise. + /// Complexity: Exactly count assignments, if count > 0. + /// + template + inline OutputIterator + copy_n_impl(InputIterator first, Size n, OutputIterator result, EASTL_ITC_NS::input_iterator_tag) + { + for(; n > 0; --n) + *result++ = *first++; + return result; + } + + template + inline OutputIterator + copy_n_impl(RandomAccessIterator first, Size n, OutputIterator result, EASTL_ITC_NS::random_access_iterator_tag) + { + return eastl::copy(first, first + n, result); // Take advantage of the optimizations present in the copy algorithm. + } + + + template + inline OutputIterator + copy_n(InputIterator first, Size n, OutputIterator result) + { + typedef typename eastl::iterator_traits::iterator_category IC; + return eastl::copy_n_impl(first, n, result, IC()); + } + + + /// copy_if + /// + /// Effects: Assigns to the result iterator only if the predicate is true. + /// + template + inline OutputIterator + copy_if(InputIterator first, InputIterator last, OutputIterator result, Predicate predicate) + { + // This implementation's performance could be improved by taking a more complicated approach like with the copy algorithm. + for(; first != last; ++first) + { + if(predicate(*first)) + *result++ = *first; + } + + return result; + } + + + + + // Implementation moving copying both trivial and non-trivial data via a lesser iterator than random-access. + template + struct move_and_copy_backward_helper + { + template + static BidirectionalIterator2 move_or_copy_backward(BidirectionalIterator1 first, BidirectionalIterator1 last, BidirectionalIterator2 resultEnd) + { + while(first != last) + *--resultEnd = *--last; + return resultEnd; // resultEnd now points to the beginning of the destination sequence instead of the end. + } + }; + + // Specialization for moving non-trivial data via a lesser iterator than random-access. + template + struct move_and_copy_backward_helper + { + template + static BidirectionalIterator2 move_or_copy_backward(BidirectionalIterator1 first, BidirectionalIterator1 last, BidirectionalIterator2 resultEnd) + { + while(first != last) + *--resultEnd = eastl::move(*--last); + return resultEnd; // resultEnd now points to the beginning of the destination sequence instead of the end. + } + }; + + // Specialization for moving non-trivial data via a random-access iterator. It's theoretically faster because the compiler can see the count when its a compile-time const. + template<> + struct move_and_copy_backward_helper + { + template + static BidirectionalIterator2 move_or_copy_backward(BidirectionalIterator1 first, BidirectionalIterator1 last, BidirectionalIterator2 resultEnd) + { + typedef typename eastl::iterator_traits::difference_type difference_type; + + for(difference_type n = (last - first); n > 0; --n) + *--resultEnd = eastl::move(*--last); + return resultEnd; // resultEnd now points to the beginning of the destination sequence instead of the end. + } + }; + + // Specialization for copying non-trivial data via a random-access iterator. It's theoretically faster because the compiler can see the count when its a compile-time const. + // This specialization converts the random access BidirectionalIterator1 last-first to an integral type. There's simple way for us to take advantage of a random access output iterator, + // as the range is specified by the input instead of the output, and distance(first, last) for a non-random-access iterator is potentially slow. + template <> + struct move_and_copy_backward_helper + { + template + static BidirectionalIterator2 move_or_copy_backward(BidirectionalIterator1 first, BidirectionalIterator1 last, BidirectionalIterator2 resultEnd) + { + typedef typename eastl::iterator_traits::difference_type difference_type; + + for(difference_type n = (last - first); n > 0; --n) + *--resultEnd = *--last; + return resultEnd; // resultEnd now points to the beginning of the destination sequence instead of the end. + } + }; + + // Specialization for when we can use memmove/memcpy. See the notes above for what conditions allow this. + template + struct move_and_copy_backward_helper + { + template + static T* move_or_copy_backward(const T* first, const T* last, T* resultEnd) + { + const size_t n = (size_t)((uintptr_t)last - (uintptr_t)first); + // We could use memcpy here if there's no range overlap, but memcpy is rarely much faster than memmove. + if (n > 0) + return (T*)memmove(resultEnd - (last - first), first, n); + else + return resultEnd; + } + }; + + template + inline BidirectionalIterator2 move_and_copy_backward_chooser(BidirectionalIterator1 first, BidirectionalIterator1 last, BidirectionalIterator2 resultEnd) + { + typedef typename eastl::iterator_traits::iterator_category IIC; + + const bool canBeMemmoved = internal::can_be_memmoved_helper::value; + + return eastl::move_and_copy_backward_helper::move_or_copy_backward(first, last, resultEnd); // Need to chose based on the input iterator tag and not the output iterator tag, because containers accept input ranges of iterator types different than self. + } + + + template + EASTL_REMOVE_AT_2024_SEPT inline BidirectionalIterator2 move_and_copy_backward_unwrapper(BidirectionalIterator1 first, BidirectionalIterator1 last, BidirectionalIterator2 resultEnd) + { + EASTL_INTERNAL_DISABLE_DEPRECATED() // 'unwrap_iterator': was declared deprecated + return BidirectionalIterator2(eastl::move_and_copy_backward_chooser(eastl::unwrap_iterator(first), eastl::unwrap_iterator(last), eastl::unwrap_iterator(resultEnd))); // Have to convert to BidirectionalIterator2 because result.base() could be a T* + EASTL_INTERNAL_RESTORE_DEPRECATED() + } + + + /// move_backward + /// + /// The elements are moved in reverse order (the last element is moved first), but their relative order is preserved. + /// After this operation the elements in the moved-from range will still contain valid values of the + /// appropriate type, but not necessarily the same values as before the move. + /// Returns the beginning of the result range. + /// Note: When moving between containers, the dest range must be valid; this function doesn't resize containers. + /// Note: If result is within [first, last), move must be used instead of move_backward. + /// + /// Example usage: + /// eastl::move_backward(myArray.begin(), myArray.end(), myDestArray.end()); + /// + /// Reference implementation: + /// template + /// BidirectionalIterator2 move_backward(BidirectionalIterator1 first, BidirectionalIterator1 last, BidirectionalIterator2 resultEnd) + /// { + /// while(last != first) + /// *--resultEnd = eastl::move(*--last); + /// return resultEnd; + /// } + /// + template + inline BidirectionalIterator2 move_backward(BidirectionalIterator1 first, BidirectionalIterator1 last, BidirectionalIterator2 resultEnd) + { + return eastl::move_and_copy_backward_chooser(first, last, resultEnd); + } + + + /// copy_backward + /// + /// copies memory in the range of [first, last) to the range *ending* with result. + /// + /// Effects: Copies elements in the range [first, last) into the range + /// [result - (last - first), result) starting from last 1 and proceeding to first. + /// For each positive integer n <= (last - first), performs *(result n) = *(last - n). + /// + /// Requires: result shall not be in the range [first, last). + /// + /// Returns: result - (last - first). That is, returns the beginning of the result range. + /// + /// Complexity: Exactly 'last - first' assignments. + /// + template + inline BidirectionalIterator2 copy_backward(BidirectionalIterator1 first, BidirectionalIterator1 last, BidirectionalIterator2 resultEnd) + { + return eastl::move_and_copy_backward_chooser(first, last, resultEnd); + } + + + /// count + /// + /// Counts the number of items in the range of [first, last) which equal the input value. + /// + /// Effects: Returns the number of iterators i in the range [first, last) for which the + /// following corresponding conditions hold: *i == value. + /// + /// Complexity: At most 'last - first' applications of the corresponding predicate. + /// + /// Note: The predicate version of count is count_if and not another variation of count. + /// This is because both versions would have three parameters and there could be ambiguity. + /// + template + inline typename eastl::iterator_traits::difference_type + count(InputIterator first, InputIterator last, const T& value) + { + typename eastl::iterator_traits::difference_type result = 0; + + for(; first != last; ++first) + { + if(*first == value) + ++result; + } + return result; + } + + + // C++ doesn't define a count with predicate, as it can effectively be synthesized via count_if + // with an appropriate predicate. However, it's often simpler to just have count with a predicate. + template + inline typename eastl::iterator_traits::difference_type + count(InputIterator first, InputIterator last, const T& value, Predicate predicate) + { + typename eastl::iterator_traits::difference_type result = 0; + + for(; first != last; ++first) + { + if(predicate(*first, value)) + ++result; + } + return result; + } + + + /// count_if + /// + /// Counts the number of items in the range of [first, last) which match + /// the input value as defined by the input predicate function. + /// + /// Effects: Returns the number of iterators i in the range [first, last) for which the + /// following corresponding conditions hold: predicate(*i) != false. + /// + /// Complexity: At most 'last - first' applications of the corresponding predicate. + /// + /// Note: The non-predicate version of count_if is count and not another variation of count_if. + /// This is because both versions would have three parameters and there could be ambiguity. + /// + template + inline typename eastl::iterator_traits::difference_type + count_if(InputIterator first, InputIterator last, Predicate predicate) + { + typename eastl::iterator_traits::difference_type result = 0; + + for(; first != last; ++first) + { + if(predicate(*first)) + ++result; + } + return result; + } + + + /// find + /// + /// finds the value within the unsorted range of [first, last). + /// + /// Returns: The first iterator i in the range [first, last) for which + /// the following corresponding conditions hold: *i == value. + /// Returns last if no such iterator is found. + /// + /// Complexity: At most 'last - first' applications of the corresponding predicate. + /// This is a linear search and not a binary one. + /// + /// Note: The predicate version of find is find_if and not another variation of find. + /// This is because both versions would have three parameters and there could be ambiguity. + /// + template + inline InputIterator + find(InputIterator first, InputIterator last, const T& value) + { + while((first != last) && !(*first == value)) // Note that we always express value comparisons in terms of < or ==. + ++first; + return first; + } + + + // C++ doesn't define a find with predicate, as it can effectively be synthesized via find_if + // with an appropriate predicate. However, it's often simpler to just have find with a predicate. + template + inline InputIterator + find(InputIterator first, InputIterator last, const T& value, Predicate predicate) + { + while((first != last) && !predicate(*first, value)) + ++first; + return first; + } + + + + /// find_if + /// + /// finds the value within the unsorted range of [first, last). + /// + /// Returns: The first iterator i in the range [first, last) for which + /// the following corresponding conditions hold: pred(*i) != false. + /// Returns last if no such iterator is found. + /// If the sequence of elements to search for (i.e. first2 - last2) is empty, + /// the find always fails and last1 will be returned. + /// + /// Complexity: At most 'last - first' applications of the corresponding predicate. + /// + /// Note: The non-predicate version of find_if is find and not another variation of find_if. + /// This is because both versions would have three parameters and there could be ambiguity. + /// + template + inline InputIterator + find_if(InputIterator first, InputIterator last, Predicate predicate) + { + while((first != last) && !predicate(*first)) + ++first; + return first; + } + + + + /// find_if_not + /// + /// find_if_not works the same as find_if except it tests for if the predicate + /// returns false for the elements instead of true. + /// + template + inline InputIterator + find_if_not(InputIterator first, InputIterator last, Predicate predicate) + { + for(; first != last; ++first) + { + if(!predicate(*first)) + return first; + } + return last; + } + + + + + /// find_first_of + /// + /// find_first_of is similar to find in that it performs linear search through + /// a range of ForwardIterators. The difference is that while find searches + /// for one particular value, find_first_of searches for any of several values. + /// Specifically, find_first_of searches for the first occurrance in the + /// range [first1, last1) of any of the elements in [first2, last2). + /// This function is thus similar to the strpbrk standard C string function. + /// If the sequence of elements to search for (i.e. first2-last2) is empty, + /// the find always fails and last1 will be returned. + /// + /// Effects: Finds an element that matches one of a set of values. + /// + /// Returns: The first iterator i in the range [first1, last1) such that for some + /// integer j in the range [first2, last2) the following conditions hold: *i == *j. + /// Returns last1 if no such iterator is found. + /// + /// Complexity: At most '(last1 - first1) * (last2 - first2)' applications of the + /// corresponding predicate. + /// + template + ForwardIterator1 + find_first_of(ForwardIterator1 first1, ForwardIterator1 last1, + ForwardIterator2 first2, ForwardIterator2 last2) + { + for(; first1 != last1; ++first1) + { + for(ForwardIterator2 i = first2; i != last2; ++i) + { + if(*first1 == *i) + return first1; + } + } + return last1; + } + + + /// find_first_of + /// + /// find_first_of is similar to find in that it performs linear search through + /// a range of ForwardIterators. The difference is that while find searches + /// for one particular value, find_first_of searches for any of several values. + /// Specifically, find_first_of searches for the first occurrance in the + /// range [first1, last1) of any of the elements in [first2, last2). + /// This function is thus similar to the strpbrk standard C string function. + /// + /// Effects: Finds an element that matches one of a set of values. + /// + /// Returns: The first iterator i in the range [first1, last1) such that for some + /// integer j in the range [first2, last2) the following conditions hold: pred(*i, *j) != false. + /// Returns last1 if no such iterator is found. + /// + /// Complexity: At most '(last1 - first1) * (last2 - first2)' applications of the + /// corresponding predicate. + /// + template + ForwardIterator1 + find_first_of(ForwardIterator1 first1, ForwardIterator1 last1, + ForwardIterator2 first2, ForwardIterator2 last2, + BinaryPredicate predicate) + { + for(; first1 != last1; ++first1) + { + for(ForwardIterator2 i = first2; i != last2; ++i) + { + if(predicate(*first1, *i)) + return first1; + } + } + return last1; + } + + + /// find_first_not_of + /// + /// Searches through first range for the first element that does not belong the second input range. + /// This is very much like the C++ string find_first_not_of function. + /// + /// Returns: The first iterator i in the range [first1, last1) such that for some + /// integer j in the range [first2, last2) the following conditions hold: !(*i == *j). + /// Returns last1 if no such iterator is found. + /// + /// Complexity: At most '(last1 - first1) * (last2 - first2)' applications of the + /// corresponding predicate. + /// + template + ForwardIterator1 + find_first_not_of(ForwardIterator1 first1, ForwardIterator1 last1, + ForwardIterator2 first2, ForwardIterator2 last2) + { + for(; first1 != last1; ++first1) + { + if(eastl::find(first2, last2, *first1) == last2) + break; + } + + return first1; + } + + + + /// find_first_not_of + /// + /// Searches through first range for the first element that does not belong the second input range. + /// This is very much like the C++ string find_first_not_of function. + /// + /// Returns: The first iterator i in the range [first1, last1) such that for some + /// integer j in the range [first2, last2) the following conditions hold: pred(*i, *j) == false. + /// Returns last1 if no such iterator is found. + /// + /// Complexity: At most '(last1 - first1) * (last2 - first2)' applications of the + /// corresponding predicate. + /// + template + inline ForwardIterator1 + find_first_not_of(ForwardIterator1 first1, ForwardIterator1 last1, + ForwardIterator2 first2, ForwardIterator2 last2, + BinaryPredicate predicate) + { + typedef typename eastl::iterator_traits::value_type value_type; + + for(; first1 != last1; ++first1) + { + if(eastl::find_if(first2, last2, [&predicate, first1](value_type& rhs) { return predicate(*first1, rhs); }) == last2) + break; + } + + return first1; + } + + + template + inline BidirectionalIterator1 + find_last_of(BidirectionalIterator1 first1, BidirectionalIterator1 last1, + ForwardIterator2 first2, ForwardIterator2 last2) + { + if((first1 != last1) && (first2 != last2)) + { + BidirectionalIterator1 it1(last1); + + while((--it1 != first1) && (eastl::find(first2, last2, *it1) == last2)) + ; // Do nothing + + if((it1 != first1) || (eastl::find(first2, last2, *it1) != last2)) + return it1; + } + + return last1; + } + + + template + BidirectionalIterator1 + find_last_of(BidirectionalIterator1 first1, BidirectionalIterator1 last1, + ForwardIterator2 first2, ForwardIterator2 last2, + BinaryPredicate predicate) + { + typedef typename eastl::iterator_traits::value_type value_type; + + if((first1 != last1) && (first2 != last2)) + { + BidirectionalIterator1 it1(last1); + + while((--it1 != first1) && (eastl::find_if(first2, last2, [&predicate, it1](value_type& rhs) { return predicate(*it1, rhs); }) == last2)) + ; // Do nothing + + if((it1 != first1) || (eastl::find_if(first2, last2, [&predicate, it1](value_type& rhs) { return predicate(*it1, rhs); }) != last2)) + return it1; + } + + return last1; + } + + + template + inline BidirectionalIterator1 + find_last_not_of(BidirectionalIterator1 first1, BidirectionalIterator1 last1, + ForwardIterator2 first2, ForwardIterator2 last2) + { + if((first1 != last1) && (first2 != last2)) + { + BidirectionalIterator1 it1(last1); + + while((--it1 != first1) && (eastl::find(first2, last2, *it1) != last2)) + ; // Do nothing + + if((it1 != first1) || (eastl::find( first2, last2, *it1) == last2)) + return it1; + } + + return last1; + } + + + template + inline BidirectionalIterator1 + find_last_not_of(BidirectionalIterator1 first1, BidirectionalIterator1 last1, + ForwardIterator2 first2, ForwardIterator2 last2, + BinaryPredicate predicate) + { + typedef typename eastl::iterator_traits::value_type value_type; + + if((first1 != last1) && (first2 != last2)) + { + BidirectionalIterator1 it1(last1); + + while((--it1 != first1) && (eastl::find_if(first2, last2, [&predicate, it1](value_type& rhs) { return predicate(*it1, rhs); }) != last2)) + ; // Do nothing + + if((it1 != first1) || (eastl::find_if(first2, last2, [&predicate, it1](value_type& rhs) { return predicate(*it1, rhs); })) != last2) + return it1; + } + + return last1; + } + + + + + /// for_each + /// + /// Calls the Function function for each value in the range [first, last). + /// Function takes a single parameter: the current value. + /// + /// Effects: Applies function to the result of dereferencing every iterator in + /// the range [first, last), starting from first and proceeding to last 1. + /// + /// Returns: function. + /// + /// Complexity: Applies function exactly 'last - first' times. + /// + /// Note: If function returns a result, the result is ignored. + /// + template + inline Function + for_each(InputIterator first, InputIterator last, Function function) + { + for(; first != last; ++first) + function(*first); + return function; + } + + /// for_each_n + /// + /// Calls the Function function for each value in the range [first, first + n). + /// Function takes a single parameter: the current value. + /// + /// Effects: Applies function to the result of dereferencing every iterator in + /// the range [first, first + n), starting from first and proceeding to last 1. + /// + /// Returns: first + n. + /// + /// Complexity: Applies function exactly 'first + n' times. + /// + /// Note: + //// * If function returns a result, the result is ignored. + //// * If n < 0, behaviour is undefined. + /// + template + EA_CPP14_CONSTEXPR inline InputIterator + for_each_n(InputIterator first, Size n, Function function) + { + for (Size i = 0; i < n; ++first, i++) + function(*first); + return first; + } + + + /// generate + /// + /// Iterates the range of [first, last) and assigns to each element the + /// result of the function generator. Generator is a function which takes + /// no arguments. + /// + /// Complexity: Exactly 'last - first' invocations of generator and assignments. + /// + template + inline void + generate(ForwardIterator first, ForwardIterator last, Generator generator) + { + for(; first != last; ++first) // We cannot call generate_n(first, last-first, generator) + *first = generator(); // because the 'last-first' might not be supported by the + } // given iterator. + + + /// generate_n + /// + /// Iterates an interator n times and assigns the result of generator + /// to each succeeding element. Generator is a function which takes + /// no arguments. + /// + /// Complexity: Exactly n invocations of generator and assignments. + /// + template + inline OutputIterator + generate_n(OutputIterator first, Size n, Generator generator) + { + for(; n > 0; --n, ++first) + *first = generator(); + return first; + } + + + /// transform + /// + /// Iterates the input range of [first, last) and the output iterator result + /// and assigns the result of unaryOperation(input) to result. + /// + /// Effects: Assigns through every iterator i in the range [result, result + (last1 - first1)) + /// a new corresponding value equal to unaryOperation(*(first1 + (i - result)). + /// + /// Requires: op shall not have any side effects. + /// + /// Returns: result + (last1 - first1). That is, returns the end of the output range. + /// + /// Complexity: Exactly 'last1 - first1' applications of unaryOperation. + /// + /// Note: result may be equal to first. + /// + template + inline OutputIterator + transform(InputIterator first, InputIterator last, OutputIterator result, UnaryOperation unaryOperation) + { + for(; first != last; ++first, ++result) + *result = unaryOperation(*first); + return result; + } + + + /// transform + /// + /// Iterates the input range of [first, last) and the output iterator result + /// and assigns the result of binaryOperation(input1, input2) to result. + /// + /// Effects: Assigns through every iterator i in the range [result, result + (last1 - first1)) + /// a new corresponding value equal to binaryOperation(*(first1 + (i - result), *(first2 + (i - result))). + /// + /// Requires: binaryOperation shall not have any side effects. + /// + /// Returns: result + (last1 - first1). That is, returns the end of the output range. + /// + /// Complexity: Exactly 'last1 - first1' applications of binaryOperation. + /// + /// Note: result may be equal to first1 or first2. + /// + template + inline OutputIterator + transform(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, OutputIterator result, BinaryOperation binaryOperation) + { + for(; first1 != last1; ++first1, ++first2, ++result) + *result = binaryOperation(*first1, *first2); + return result; + } + + + /// equal + /// + /// Returns: true if for every iterator i in the range [first1, last1) the + /// following corresponding conditions hold: predicate(*i, *(first2 + (i - first1))) != false. + /// Otherwise, returns false. + /// + /// Complexity: At most last1 first1 applications of the corresponding predicate. + /// + /// To consider: Make specializations of this for scalar types and random access + /// iterators that uses memcmp or some trick memory comparison function. + /// We should verify that such a thing results in an improvement. + /// + template + EA_CPP14_CONSTEXPR inline bool equal(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2) + { + for(; first1 != last1; ++first1, ++first2) + { + if(!(*first1 == *first2)) // Note that we always express value comparisons in terms of < or ==. + return false; + } + return true; + } + + /* Enable the following if there was shown to be some benefit. A glance and Microsoft VC++ memcmp + shows that it is not optimized in any way, much less one that would benefit us here. + + inline bool equal(const bool* first1, const bool* last1, const bool* first2) + { return (memcmp(first1, first2, (size_t)((uintptr_t)last1 - (uintptr_t)first1)) == 0); } + + inline bool equal(const char* first1, const char* last1, const char* first2) + { return (memcmp(first1, first2, (size_t)((uintptr_t)last1 - (uintptr_t)first1)) == 0); } + + inline bool equal(const unsigned char* first1, const unsigned char* last1, const unsigned char* first2) + { return (memcmp(first1, first2, (size_t)((uintptr_t)last1 - (uintptr_t)first1)) == 0); } + + inline bool equal(const signed char* first1, const signed char* last1, const signed char* first2) + { return (memcmp(first1, first2, (size_t)((uintptr_t)last1 - (uintptr_t)first1)) == 0); } + + #ifndef EA_WCHAR_T_NON_NATIVE + inline bool equal(const wchar_t* first1, const wchar_t* last1, const wchar_t* first2) + { return (memcmp(first1, first2, (size_t)((uintptr_t)last1 - (uintptr_t)first1)) == 0); } + #endif + + inline bool equal(const int16_t* first1, const int16_t* last1, const int16_t* first2) + { return (memcmp(first1, first2, (size_t)((uintptr_t)last1 - (uintptr_t)first1)) == 0); } + + inline bool equal(const uint16_t* first1, const uint16_t* last1, const uint16_t* first2) + { return (memcmp(first1, first2, (size_t)((uintptr_t)last1 - (uintptr_t)first1)) == 0); } + + inline bool equal(const int32_t* first1, const int32_t* last1, const int32_t* first2) + { return (memcmp(first1, first2, (size_t)((uintptr_t)last1 - (uintptr_t)first1)) == 0); } + + inline bool equal(const uint32_t* first1, const uint32_t* last1, const uint32_t* first2) + { return (memcmp(first1, first2, (size_t)((uintptr_t)last1 - (uintptr_t)first1)) == 0); } + + inline bool equal(const int64_t* first1, const int64_t* last1, const int64_t* first2) + { return (memcmp(first1, first2, (size_t)((uintptr_t)last1 - (uintptr_t)first1)) == 0); } + + inline bool equal(const uint64_t* first1, const uint64_t* last1, const uint64_t* first2) + { return (memcmp(first1, first2, (size_t)((uintptr_t)last1 - (uintptr_t)first1)) == 0); } + */ + + + + /// equal + /// + /// Returns: true if for every iterator i in the range [first1, last1) the + /// following corresponding conditions hold: pred(*i, *(first2 + (i first1))) != false. + /// Otherwise, returns false. + /// + /// Complexity: At most last1 first1 applications of the corresponding predicate. + /// + template + inline bool + equal(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, BinaryPredicate predicate) + { + for(; first1 != last1; ++first1, ++first2) + { + if(!predicate(*first1, *first2)) + return false; + } + return true; + } + + + + /// identical + /// + /// Returns true if the two input ranges are equivalent. + /// There is a subtle difference between this algorithm and + /// the 'equal' algorithm. The equal algorithm assumes the + /// two ranges are of equal length. This algorithm efficiently + /// compares two ranges for both length equality and for + /// element equality. There is no other standard algorithm + /// that can do this. + /// + /// Returns: true if the sequence of elements defined by the range + /// [first1, last1) is of the same length as the sequence of + /// elements defined by the range of [first2, last2) and if + /// the elements in these ranges are equal as per the + /// equal algorithm. + /// + /// Complexity: At most 'min((last1 - first1), (last2 - first2))' applications + /// of the corresponding comparison. + /// + template + bool identical(InputIterator1 first1, InputIterator1 last1, + InputIterator2 first2, InputIterator2 last2) + { + while((first1 != last1) && (first2 != last2) && (*first1 == *first2)) + { + ++first1; + ++first2; + } + return (first1 == last1) && (first2 == last2); + } + + + /// identical + /// + template + bool identical(InputIterator1 first1, InputIterator1 last1, + InputIterator2 first2, InputIterator2 last2, BinaryPredicate predicate) + { + while((first1 != last1) && (first2 != last2) && predicate(*first1, *first2)) + { + ++first1; + ++first2; + } + return (first1 == last1) && (first2 == last2); + } + + + + /// lexicographical_compare + /// + /// Returns: true if the sequence of elements defined by the range + /// [first1, last1) is lexicographically less than the sequence of + /// elements defined by the range [first2, last2). Returns false otherwise. + /// + /// Complexity: At most 'min((last1 - first1), (last2 - first2))' applications + /// of the corresponding comparison. + /// + /// Note: If two sequences have the same number of elements and their + /// corresponding elements are equivalent, then neither sequence is + /// lexicographically less than the other. If one sequence is a prefix + /// of the other, then the shorter sequence is lexicographically less + /// than the longer sequence. Otherwise, the lexicographical comparison + /// of the sequences yields the same result as the comparison of the first + /// corresponding pair of elements that are not equivalent. + /// + template + inline bool + lexicographical_compare(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2) + { + for(; (first1 != last1) && (first2 != last2); ++first1, ++first2) + { + if(*first1 < *first2) + return true; + if(*first2 < *first1) + return false; + } + return (first1 == last1) && (first2 != last2); + } + + inline bool // Specialization for const char*. + lexicographical_compare(const char* first1, const char* last1, const char* first2, const char* last2) + { + const ptrdiff_t n1(last1 - first1), n2(last2 - first2); + const size_t n = (size_t)eastl::min_alt(n1, n2); + if (n == 0) // don't call memcmp with n == 0 + return false; + const int result = memcmp(first1, first2, n); + return result ? (result < 0) : (n1 < n2); + } + + inline bool // Specialization for char*. + lexicographical_compare(char* first1, char* last1, char* first2, char* last2) + { + const ptrdiff_t n1(last1 - first1), n2(last2 - first2); + const size_t n = (size_t)eastl::min_alt(n1, n2); + if (n == 0) // don't call memcmp with n == 0 + return false; + const int result = memcmp(first1, first2, n); + return result ? (result < 0) : (n1 < n2); + } + + inline bool // Specialization for const unsigned char*. + lexicographical_compare(const unsigned char* first1, const unsigned char* last1, const unsigned char* first2, const unsigned char* last2) + { + const ptrdiff_t n1(last1 - first1), n2(last2 - first2); + const size_t n = (size_t)eastl::min_alt(n1, n2); + if (n == 0) // don't call memcmp with n == 0 + return false; + const int result = memcmp(first1, first2, n); + return result ? (result < 0) : (n1 < n2); + } + + inline bool // Specialization for unsigned char*. + lexicographical_compare(unsigned char* first1, unsigned char* last1, unsigned char* first2, unsigned char* last2) + { + const ptrdiff_t n1(last1 - first1), n2(last2 - first2); + const size_t n = (size_t)eastl::min_alt(n1, n2); + if (n == 0) // don't call memcmp with n == 0 + return false; + const int result = memcmp(first1, first2, n); + return result ? (result < 0) : (n1 < n2); + } + + inline bool // Specialization for const signed char*. + lexicographical_compare(const signed char* first1, const signed char* last1, const signed char* first2, const signed char* last2) + { + const ptrdiff_t n1(last1 - first1), n2(last2 - first2); + const size_t n = (size_t)eastl::min_alt(n1, n2); + if (n == 0) // don't call memcmp with n == 0 + return false; + const int result = memcmp(first1, first2, n); + return result ? (result < 0) : (n1 < n2); + } + + inline bool // Specialization for signed char*. + lexicographical_compare(signed char* first1, signed char* last1, signed char* first2, signed char* last2) + { + const ptrdiff_t n1(last1 - first1), n2(last2 - first2); + const size_t n = (size_t)eastl::min_alt(n1, n2); + if (n == 0) // don't call memcmp with n == 0 + return false; + const int result = memcmp(first1, first2, n); + return result ? (result < 0) : (n1 < n2); + } + + #if defined(_MSC_VER) // If using the VC++ compiler (and thus bool is known to be a single byte)... + //Not sure if this is a good idea. + //inline bool // Specialization for const bool*. + //lexicographical_compare(const bool* first1, const bool* last1, const bool* first2, const bool* last2) + //{ + // const ptrdiff_t n1(last1 - first1), n2(last2 - first2); + // const int result = memcmp(first1, first2, (size_t)eastl::min_alt(n1, n2)); + // return result ? (result < 0) : (n1 < n2); + //} + // + //inline bool // Specialization for bool*. + //lexicographical_compare(bool* first1, bool* last1, bool* first2, bool* last2) + //{ + // const ptrdiff_t n1(last1 - first1), n2(last2 - first2); + // const int result = memcmp(first1, first2, (size_t)eastl::min_alt(n1, n2)); + // return result ? (result < 0) : (n1 < n2); + //} + #endif + + + + /// lexicographical_compare + /// + /// Returns: true if the sequence of elements defined by the range + /// [first1, last1) is lexicographically less than the sequence of + /// elements defined by the range [first2, last2). Returns false otherwise. + /// + /// Complexity: At most 'min((last1 -first1), (last2 - first2))' applications + /// of the corresponding comparison. + /// + /// Note: If two sequences have the same number of elements and their + /// corresponding elements are equivalent, then neither sequence is + /// lexicographically less than the other. If one sequence is a prefix + /// of the other, then the shorter sequence is lexicographically less + /// than the longer sequence. Otherwise, the lexicographical comparison + /// of the sequences yields the same result as the comparison of the first + /// corresponding pair of elements that are not equivalent. + /// + /// Note: False is always returned if range 1 is exhausted before range 2. + /// The result of this is that you can't do a successful reverse compare + /// (e.g. use greater<> as the comparison instead of less<>) unless the + /// two sequences are of identical length. What you want to do is reverse + /// the order of the arguments in order to get the desired effect. + /// + template + inline bool + lexicographical_compare(InputIterator1 first1, InputIterator1 last1, + InputIterator2 first2, InputIterator2 last2, Compare compare) + { + for(; (first1 != last1) && (first2 != last2); ++first1, ++first2) + { + if(compare(*first1, *first2)) + return true; + if(compare(*first2, *first1)) + return false; + } + return (first1 == last1) && (first2 != last2); + } + + +#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON) + + /// lexicographical_compare_three_way + /// + /// Returns: The comparison category ordering between both ranges. For the first non-equivalent pair in the ranges, + /// the comparison will be returned. Else if the first range is a subset (superset) of the second range, then the + /// less (greater) ordering will be returned. + /// + /// Complexity: At most N iterations, where N = min(last1-first1, last2-first2) of the applications + /// of the corresponding comparison. + /// + /// Note: If two sequences have the same number of elements and their + /// corresponding elements are equivalent, then neither sequence is + /// lexicographically less than the other. If one sequence is a prefix + /// of the other, then the shorter sequence is lexicographically less + /// than the longer sequence. Otherwise, the lexicographical comparison + /// of the sequences yields the same result as the comparison of the first + /// corresponding pair of elements that are not equivalent. + /// + template + constexpr auto lexicographical_compare_three_way(InputIterator1 first1, InputIterator1 last1, + InputIterator2 first2, InputIterator2 last2, + Compare compare) -> decltype(compare(*first1, *first2)) + { + for (; (first1 != last1) && (first2 != last2); ++first1, ++first2) + { + if (auto c = compare(*first1, *first2); c != 0) + return c; + } + + return (first1 != last1) ? std::strong_ordering::greater : + (first2 != last2) ? std::strong_ordering::less : + std::strong_ordering::equal; + } +#endif + + /// mismatch + /// + /// Finds the first position where the two ranges [first1, last1) and + /// [first2, first2 + (last1 - first1)) differ. The two versions of + /// mismatch use different tests for whether elements differ. + /// + /// Returns: A pair of iterators i and j such that j == first2 + (i - first1) + /// and i is the first iterator in the range [first1, last1) for which the + /// following corresponding condition holds: !(*i == *(first2 + (i - first1))). + /// Returns the pair last1 and first2 + (last1 - first1) if such an iterator + /// i is not found. + /// + /// Complexity: At most last1 first1 applications of the corresponding predicate. + /// + template + inline eastl::pair + mismatch(InputIterator1 first1, InputIterator1 last1, + InputIterator2 first2) // , InputIterator2 last2) + { + while((first1 != last1) && (*first1 == *first2)) // && (first2 != last2) <- C++ standard mismatch function doesn't check first2/last2. + { + ++first1; + ++first2; + } + + return eastl::pair(first1, first2); + } + + + /// mismatch + /// + /// Finds the first position where the two ranges [first1, last1) and + /// [first2, first2 + (last1 - first1)) differ. The two versions of + /// mismatch use different tests for whether elements differ. + /// + /// Returns: A pair of iterators i and j such that j == first2 + (i - first1) + /// and i is the first iterator in the range [first1, last1) for which the + /// following corresponding condition holds: pred(*i, *(first2 + (i - first1))) == false. + /// Returns the pair last1 and first2 + (last1 - first1) if such an iterator + /// i is not found. + /// + /// Complexity: At most last1 first1 applications of the corresponding predicate. + /// + template + inline eastl::pair + mismatch(InputIterator1 first1, InputIterator1 last1, + InputIterator2 first2, // InputIterator2 last2, + BinaryPredicate predicate) + { + while((first1 != last1) && predicate(*first1, *first2)) // && (first2 != last2) <- C++ standard mismatch function doesn't check first2/last2. + { + ++first1; + ++first2; + } + + return eastl::pair(first1, first2); + } + + + /// lower_bound + /// + /// Finds the position of the first element in a sorted range that has a value + /// greater than or equivalent to a specified value. + /// + /// Effects: Finds the first position into which value can be inserted without + /// violating the ordering. + /// + /// Returns: The furthermost iterator i in the range [first, last) such that + /// for any iterator j in the range [first, i) the following corresponding + /// condition holds: *j < value. + /// + /// Complexity: At most 'log(last - first) + 1' comparisons. + /// + /// Optimizations: We have no need to specialize this implementation for random + /// access iterators (e.g. contiguous array), as the code below will already + /// take advantage of them. + /// + template + ForwardIterator + lower_bound(ForwardIterator first, ForwardIterator last, const T& value) + { + typedef typename eastl::iterator_traits::difference_type DifferenceType; + + DifferenceType d = eastl::distance(first, last); // This will be efficient for a random access iterator such as an array. + + while(d > 0) + { + ForwardIterator i = first; + DifferenceType d2 = d >> 1; // We use '>>1' here instead of '/2' because MSVC++ for some reason generates significantly worse code for '/2'. Go figure. + + eastl::advance(i, d2); // This will be efficient for a random access iterator such as an array. + + if(*i < value) + { + // Disabled because std::lower_bound doesn't specify (23.3.3.3, p3) this can be done: EASTL_VALIDATE_COMPARE(!(value < *i)); // Validate that the compare function is sane. + first = ++i; + d -= d2 + 1; + } + else + d = d2; + } + return first; + } + + + /// lower_bound + /// + /// Finds the position of the first element in a sorted range that has a value + /// greater than or equivalent to a specified value. The input Compare function + /// takes two arguments and returns true if the first argument is less than + /// the second argument. + /// + /// Effects: Finds the first position into which value can be inserted without + /// violating the ordering. + /// + /// Returns: The furthermost iterator i in the range [first, last) such that + /// for any iterator j in the range [first, i) the following corresponding + /// condition holds: compare(*j, value) != false. + /// + /// Complexity: At most 'log(last - first) + 1' comparisons. + /// + /// Optimizations: We have no need to specialize this implementation for random + /// access iterators (e.g. contiguous array), as the code below will already + /// take advantage of them. + /// + template + ForwardIterator + lower_bound(ForwardIterator first, ForwardIterator last, const T& value, Compare compare) + { + typedef typename eastl::iterator_traits::difference_type DifferenceType; + + DifferenceType d = eastl::distance(first, last); // This will be efficient for a random access iterator such as an array. + + while(d > 0) + { + ForwardIterator i = first; + DifferenceType d2 = d >> 1; // We use '>>1' here instead of '/2' because MSVC++ for some reason generates significantly worse code for '/2'. Go figure. + + eastl::advance(i, d2); // This will be efficient for a random access iterator such as an array. + + if(compare(*i, value)) + { + // Disabled because std::lower_bound doesn't specify (23.3.3.1, p3) this can be done: EASTL_VALIDATE_COMPARE(!compare(value, *i)); // Validate that the compare function is sane. + first = ++i; + d -= d2 + 1; + } + else + d = d2; + } + return first; + } + + + + /// upper_bound + /// + /// Finds the position of the first element in a sorted range that has a + /// value that is greater than a specified value. + /// + /// Effects: Finds the furthermost position into which value can be inserted + /// without violating the ordering. + /// + /// Returns: The furthermost iterator i in the range [first, last) such that + /// for any iterator j in the range [first, i) the following corresponding + /// condition holds: !(value < *j). + /// + /// Complexity: At most 'log(last - first) + 1' comparisons. + /// + template + ForwardIterator + upper_bound(ForwardIterator first, ForwardIterator last, const T& value) + { + typedef typename eastl::iterator_traits::difference_type DifferenceType; + + DifferenceType len = eastl::distance(first, last); + + while(len > 0) + { + ForwardIterator i = first; + DifferenceType len2 = len >> 1; // We use '>>1' here instead of '/2' because MSVC++ for some reason generates significantly worse code for '/2'. Go figure. + + eastl::advance(i, len2); + + if(!(value < *i)) // Note that we always express value comparisons in terms of < or ==. + { + first = ++i; + len -= len2 + 1; + } + else + { + // Disabled because std::upper_bound doesn't specify (23.3.3.2, p3) this can be done: EASTL_VALIDATE_COMPARE(!(*i < value)); // Validate that the compare function is sane. + len = len2; + } + } + return first; + } + + + /// upper_bound + /// + /// Finds the position of the first element in a sorted range that has a + /// value that is greater than a specified value. The input Compare function + /// takes two arguments and returns true if the first argument is less than + /// the second argument. + /// + /// Effects: Finds the furthermost position into which value can be inserted + /// without violating the ordering. + /// + /// Returns: The furthermost iterator i in the range [first, last) such that + /// for any iterator j in the range [first, i) the following corresponding + /// condition holds: compare(value, *j) == false. + /// + /// Complexity: At most 'log(last - first) + 1' comparisons. + /// + template + ForwardIterator + upper_bound(ForwardIterator first, ForwardIterator last, const T& value, Compare compare) + { + typedef typename eastl::iterator_traits::difference_type DifferenceType; + + DifferenceType len = eastl::distance(first, last); + + while(len > 0) + { + ForwardIterator i = first; + DifferenceType len2 = len >> 1; // We use '>>1' here instead of '/2' because MSVC++ for some reason generates significantly worse code for '/2'. Go figure. + + eastl::advance(i, len2); + + if(!compare(value, *i)) + { + first = ++i; + len -= len2 + 1; + } + else + { + // Disabled because std::upper_bound doesn't specify (23.3.3.2, p3) this can be done: EASTL_VALIDATE_COMPARE(!compare(*i, value)); // Validate that the compare function is sane. + len = len2; + } + } + return first; + } + + + /// equal_range + /// + /// Effects: Finds the largest subrange [i, j) such that the value can be inserted + /// at any iterator k in it without violating the ordering. k satisfies the + /// corresponding conditions: !(*k < value) && !(value < *k). + /// + /// Complexity: At most '2 * log(last - first) + 1' comparisons. + /// + template + pair + equal_range(ForwardIterator first, ForwardIterator last, const T& value) + { + typedef pair ResultType; + typedef typename eastl::iterator_traits::difference_type DifferenceType; + + DifferenceType d = eastl::distance(first, last); + + while(d > 0) + { + ForwardIterator i(first); + DifferenceType d2 = d >> 1; // We use '>>1' here instead of '/2' because MSVC++ for some reason generates significantly worse code for '/2'. Go figure. + + eastl::advance(i, d2); + + if(*i < value) + { + EASTL_VALIDATE_COMPARE(!(value < *i)); // Validate that the compare function is sane. + first = ++i; + d -= d2 + 1; + } + else if(value < *i) + { + EASTL_VALIDATE_COMPARE(!(*i < value)); // Validate that the compare function is sane. + d = d2; + last = i; + } + else + { + ForwardIterator j(i); + + return ResultType(eastl::lower_bound(first, i, value), + eastl::upper_bound(++j, last, value)); + } + } + return ResultType(first, first); + } + + + /// equal_range + /// + /// Effects: Finds the largest subrange [i, j) such that the value can be inserted + /// at any iterator k in it without violating the ordering. k satisfies the + /// corresponding conditions: compare(*k, value) == false && compare(value, *k) == false. + /// + /// Complexity: At most '2 * log(last - first) + 1' comparisons. + /// + template + pair + equal_range(ForwardIterator first, ForwardIterator last, const T& value, Compare compare) + { + typedef pair ResultType; + typedef typename eastl::iterator_traits::difference_type DifferenceType; + + DifferenceType d = eastl::distance(first, last); + + while(d > 0) + { + ForwardIterator i(first); + DifferenceType d2 = d >> 1; // We use '>>1' here instead of '/2' because MSVC++ for some reason generates significantly worse code for '/2'. Go figure. + + eastl::advance(i, d2); + + if(compare(*i, value)) + { + EASTL_VALIDATE_COMPARE(!compare(value, *i)); // Validate that the compare function is sane. + first = ++i; + d -= d2 + 1; + } + else if(compare(value, *i)) + { + EASTL_VALIDATE_COMPARE(!compare(*i, value)); // Validate that the compare function is sane. + d = d2; + last = i; + } + else + { + ForwardIterator j(i); + + return ResultType(eastl::lower_bound(first, i, value, compare), + eastl::upper_bound(++j, last, value, compare)); + } + } + return ResultType(first, first); + } + + + /// replace + /// + /// Effects: Substitutes elements referred by the iterator i in the range [first, last) + /// with new_value, when the following corresponding conditions hold: *i == old_value. + /// + /// Complexity: Exactly 'last - first' applications of the corresponding predicate. + /// + /// Note: The predicate version of replace is replace_if and not another variation of replace. + /// This is because both versions would have the same parameter count and there could be ambiguity. + /// + template + inline void + replace(ForwardIterator first, ForwardIterator last, const T& old_value, const T& new_value) + { + for(; first != last; ++first) + { + if(*first == old_value) + *first = new_value; + } + } + + + /// replace_if + /// + /// Effects: Substitutes elements referred by the iterator i in the range [first, last) + /// with new_value, when the following corresponding conditions hold: predicate(*i) != false. + /// + /// Complexity: Exactly 'last - first' applications of the corresponding predicate. + /// + /// Note: The predicate version of replace_if is replace and not another variation of replace_if. + /// This is because both versions would have the same parameter count and there could be ambiguity. + /// + template + inline void + replace_if(ForwardIterator first, ForwardIterator last, Predicate predicate, const T& new_value) + { + for(; first != last; ++first) + { + if(predicate(*first)) + *first = new_value; + } + } + + + /// remove_copy + /// + /// Effects: Copies all the elements referred to by the iterator i in the range + /// [first, last) for which the following corresponding condition does not hold: + /// *i == value. + /// + /// Requires: The ranges [first, last) and [result, result + (last - first)) shall not overlap. + /// + /// Returns: The end of the resulting range. + /// + /// Complexity: Exactly 'last - first' applications of the corresponding predicate. + /// + template + inline OutputIterator + remove_copy(InputIterator first, InputIterator last, OutputIterator result, const T& value) + { + for(; first != last; ++first) + { + if(!(*first == value)) // Note that we always express value comparisons in terms of < or ==. + { + *result = eastl::move(*first); + ++result; + } + } + return result; + } + + + /// remove_copy_if + /// + /// Effects: Copies all the elements referred to by the iterator i in the range + /// [first, last) for which the following corresponding condition does not hold: + /// predicate(*i) != false. + /// + /// Requires: The ranges [first, last) and [result, result + (last - first)) shall not overlap. + /// + /// Returns: The end of the resulting range. + /// + /// Complexity: Exactly 'last - first' applications of the corresponding predicate. + /// + template + inline OutputIterator + remove_copy_if(InputIterator first, InputIterator last, OutputIterator result, Predicate predicate) + { + for(; first != last; ++first) + { + if(!predicate(*first)) + { + *result = eastl::move(*first); + ++result; + } + } + return result; + } + + + /// remove + /// + /// Effects: Eliminates all the elements referred to by iterator i in the + /// range [first, last) for which the following corresponding condition + /// holds: *i == value. + /// + /// Returns: The end of the resulting range. + /// + /// Complexity: Exactly 'last - first' applications of the corresponding predicate. + /// + /// Note: The predicate version of remove is remove_if and not another variation of remove. + /// This is because both versions would have the same parameter count and there could be ambiguity. + /// + /// Note: Since this function moves the element to the back of the heap and + /// doesn't actually remove it from the given container, the user must call + /// the container erase function if the user wants to erase the element + /// from the container. + /// + /// Example usage: + /// vector intArray; + /// ... + /// intArray.erase(remove(intArray.begin(), intArray.end(), 4), intArray.end()); // Erase all elements of value 4. + /// + template + inline ForwardIterator + remove(ForwardIterator first, ForwardIterator last, const T& value) + { + first = eastl::find(first, last, value); + if(first != last) + { + ForwardIterator i(first); + return eastl::remove_copy(++i, last, first, value); + } + return first; + } + + + /// remove_if + /// + /// Effects: Eliminates all the elements referred to by iterator i in the + /// range [first, last) for which the following corresponding condition + /// holds: predicate(*i) != false. + /// + /// Returns: The end of the resulting range. + /// + /// Complexity: Exactly 'last - first' applications of the corresponding predicate. + /// + /// Note: The predicate version of remove_if is remove and not another variation of remove_if. + /// This is because both versions would have the same parameter count and there could be ambiguity. + /// + /// Note: Since this function moves the element to the back of the heap and + /// doesn't actually remove it from the given container, the user must call + /// the container erase function if the user wants to erase the element + /// from the container. + /// + /// Example usage: + /// vector intArray; + /// ... + /// intArray.erase(remove(intArray.begin(), intArray.end(), bind2nd(less(), (int)3)), intArray.end()); // Erase all elements less than 3. + /// + template + inline ForwardIterator + remove_if(ForwardIterator first, ForwardIterator last, Predicate predicate) + { + first = eastl::find_if(first, last, predicate); + if(first != last) + { + ForwardIterator i(first); + return eastl::remove_copy_if(++i, last, first, predicate); + } + return first; + } + + + /// apply_and_remove_if + /// + /// Calls the Function function for all elements referred to my iterator i in the range + /// [first, last) for which the following corresponding condition holds: + /// predicate(*i) == true + /// and then left shift moves potential non-matching elements over it. + /// + /// Returns: a past-the-end iterator for the new end of the range. + /// + /// Complexity: Exactly 'last - first' applications of the corresponding predicate + applies + /// function once for every time the condition holds. + /// + /// Note: Since removing is done by shifting (by means of copy move assignment) the elements + /// in the range in such a way that the elements that are not to be removed appear in the + /// beginning of the range doesn't actually remove it from the given container, the user must call + /// the container erase function if the user wants to erase the element + /// from the container. I.e. in the same they as for remove_if the excess elements + /// are left in a valid but possibly moved from state. + /// + template + inline ForwardIterator apply_and_remove_if(ForwardIterator first, + ForwardIterator last, + Function function, + Predicate predicate) + { + first = eastl::find_if(first, last, predicate); + if (first != last) + { + function(*first); + for (auto i = next(first); i != last; ++i) + { + if (predicate(*i)) + { + function(*i); + continue; + } + *first = eastl::move(*i); + ++first; + } + } + return first; + } + + + /// apply_and_remove + /// + /// Calls the Function function for all elements referred to my iterator i in the range + /// [first, last) for which the following corresponding condition holds: + /// value == *i + /// and then left shift moves potential non-matching elements over it. + /// + /// Returns: a past-the-end iterator for the new end of the range. + /// + /// Complexity: Exactly 'last - first' applications of the corresponding equality test + /// + applies function once for every time the condition holds. + /// + /// Note: Since removing is done by shifting (by means of copy move assignment) the elements + /// in the range in such a way that the elements that are not to be removed appear in the + /// beginning of the range doesn't actually remove it from the given container, the user must call + /// the container erase function if the user wants to erase the element + /// from the container. I.e. in the same they as for remove_if the excess elements + /// are left in a valid but possibly moved from state. + /// + template + inline ForwardIterator apply_and_remove(ForwardIterator first, + ForwardIterator last, + Function function, + const T& value) + { + first = eastl::find(first, last, value); + if (first != last) + { + function(*first); + for (auto i = next(first); i != last; ++i) + { + if (value == *i) + { + function(*i); + continue; + } + *first = eastl::move(*i); + ++first; + } + } + return first; + } + + + /// replace_copy + /// + /// Effects: Assigns to every iterator i in the range [result, result + (last - first)) + /// either new_value or *(first + (i - result)) depending on whether the following + /// corresponding conditions hold: *(first + (i - result)) == old_value. + /// + /// Requires: The ranges [first, last) and [result, result + (last - first)) shall not overlap. + /// + /// Returns: result + (last - first). + /// + /// Complexity: Exactly 'last - first' applications of the corresponding predicate. + /// + /// Note: The predicate version of replace_copy is replace_copy_if and not another variation of replace_copy. + /// This is because both versions would have the same parameter count and there could be ambiguity. + /// + template + inline OutputIterator + replace_copy(InputIterator first, InputIterator last, OutputIterator result, const T& old_value, const T& new_value) + { + for(; first != last; ++first, ++result) + *result = (*first == old_value) ? new_value : *first; + return result; + } + + + /// replace_copy_if + /// + /// Effects: Assigns to every iterator i in the range [result, result + (last - first)) + /// either new_value or *(first + (i - result)) depending on whether the following + /// corresponding conditions hold: predicate(*(first + (i - result))) != false. + /// + /// Requires: The ranges [first, last) and [result, result+(lastfirst)) shall not overlap. + /// + /// Returns: result + (last - first). + /// + /// Complexity: Exactly 'last - first' applications of the corresponding predicate. + /// + /// Note: The predicate version of replace_copy_if is replace_copy and not another variation of replace_copy_if. + /// This is because both versions would have the same parameter count and there could be ambiguity. + /// + template + inline OutputIterator + replace_copy_if(InputIterator first, InputIterator last, OutputIterator result, Predicate predicate, const T& new_value) + { + for(; first != last; ++first, ++result) + *result = predicate(*first) ? new_value : *first; + return result; + } + + + + + // reverse + // + // We provide helper functions which allow reverse to be implemented more + // efficiently for some types of iterators and types. + // + template + inline void reverse_impl(BidirectionalIterator first, BidirectionalIterator last, EASTL_ITC_NS::bidirectional_iterator_tag) + { + for(; (first != last) && (first != --last); ++first) // We are not allowed to use operator <, <=, >, >= with a + eastl::iter_swap(first, last); // generic (bidirectional or otherwise) iterator. + } + + template + inline void reverse_impl(RandomAccessIterator first, RandomAccessIterator last, EASTL_ITC_NS::random_access_iterator_tag) + { + if(first != last) + { + for(; first < --last; ++first) // With a random access iterator, we can use operator < to more efficiently implement + eastl::iter_swap(first, last); // this algorithm. A generic iterator doesn't necessarily have an operator < defined. + } + } + + /// reverse + /// + /// Reverses the values within the range [first, last). + /// + /// Effects: For each nonnegative integer i <= (last - first) / 2, + /// applies swap to all pairs of iterators first + i, (last i) - 1. + /// + /// Complexity: Exactly '(last - first) / 2' swaps. + /// + template + inline void reverse(BidirectionalIterator first, BidirectionalIterator last) + { + typedef typename eastl::iterator_traits::iterator_category IC; + eastl::reverse_impl(first, last, IC()); + } + + + + /// reverse_copy + /// + /// Copies the range [first, last) in reverse order to the result. + /// + /// Effects: Copies the range [first, last) to the range + /// [result, result + (last - first)) such that for any nonnegative + /// integer i < (last - first) the following assignment takes place: + /// *(result + (last - first) - i) = *(first + i) + /// + /// Requires: The ranges [first, last) and [result, result + (last - first)) + /// shall not overlap. + /// + /// Returns: result + (last - first). That is, returns the end of the output range. + /// + /// Complexity: Exactly 'last - first' assignments. + /// + template + inline OutputIterator + reverse_copy(BidirectionalIterator first, BidirectionalIterator last, OutputIterator result) + { + for(; first != last; ++result) + *result = *--last; + return result; + } + + + + /// search + /// + /// Search finds a subsequence within the range [first1, last1) that is identical to [first2, last2) + /// when compared element-by-element. It returns an iterator pointing to the beginning of that + /// subsequence, or else last1 if no such subsequence exists. As such, it is very much like + /// the C strstr function, with the primary difference being that strstr uses 0-terminated strings + /// whereas search uses an end iterator to specify the end of a string. + /// + /// Returns: The first iterator i in the range [first1, last1 - (last2 - first2)) such that for + /// any nonnegative integer n less than 'last2 - first2' the following corresponding condition holds: + /// *(i + n) == *(first2 + n). Returns last1 if no such iterator is found. + /// + /// Complexity: At most (last1 first1) * (last2 first2) applications of the corresponding predicate. + /// + template + ForwardIterator1 + search(ForwardIterator1 first1, ForwardIterator1 last1, + ForwardIterator2 first2, ForwardIterator2 last2) + { + if(first2 != last2) // If there is anything to search for... + { + // We need to make a special case for a pattern of one element, + // as the logic below prevents one element patterns from working. + ForwardIterator2 temp2(first2); + ++temp2; + + if(temp2 != last2) // If what we are searching for has a length > 1... + { + ForwardIterator1 cur1(first1); + ForwardIterator2 p2; + + while(first1 != last1) + { + // The following loop is the equivalent of eastl::find(first1, last1, *first2) + while((first1 != last1) && !(*first1 == *first2)) + ++first1; + + if(first1 != last1) + { + p2 = temp2; + cur1 = first1; + + if(++cur1 != last1) + { + while(*cur1 == *p2) + { + if(++p2 == last2) + return first1; + + if(++cur1 == last1) + return last1; + } + + ++first1; + continue; + } + } + return last1; + } + + // Fall through to the end. + } + else + return eastl::find(first1, last1, *first2); + } + + return first1; + + + #if 0 + /* Another implementation which is a little more simpler but executes a little slower on average. + typedef typename eastl::iterator_traits::difference_type difference_type_1; + typedef typename eastl::iterator_traits::difference_type difference_type_2; + + const difference_type_2 d2 = eastl::distance(first2, last2); + + for(difference_type_1 d1 = eastl::distance(first1, last1); d1 >= d2; ++first1, --d1) + { + ForwardIterator1 temp1 = first1; + + for(ForwardIterator2 temp2 = first2; ; ++temp1, ++temp2) + { + if(temp2 == last2) + return first1; + if(!(*temp1 == *temp2)) + break; + } + } + + return last1; + */ + #endif + } + + + /// search + /// + /// Search finds a subsequence within the range [first1, last1) that is identical to [first2, last2) + /// when compared element-by-element. It returns an iterator pointing to the beginning of that + /// subsequence, or else last1 if no such subsequence exists. As such, it is very much like + /// the C strstr function, with the only difference being that strstr uses 0-terminated strings + /// whereas search uses an end iterator to specify the end of a string. + /// + /// Returns: The first iterator i in the range [first1, last1 - (last2 - first2)) such that for + /// any nonnegative integer n less than 'last2 - first2' the following corresponding condition holds: + /// predicate(*(i + n), *(first2 + n)) != false. Returns last1 if no such iterator is found. + /// + /// Complexity: At most (last1 first1) * (last2 first2) applications of the corresponding predicate. + /// + template + ForwardIterator1 + search(ForwardIterator1 first1, ForwardIterator1 last1, + ForwardIterator2 first2, ForwardIterator2 last2, + BinaryPredicate predicate) + { + typedef typename eastl::iterator_traits::difference_type difference_type_1; + typedef typename eastl::iterator_traits::difference_type difference_type_2; + + difference_type_2 d2 = eastl::distance(first2, last2); + + if(d2 != 0) + { + ForwardIterator1 i(first1); + eastl::advance(i, d2); + + for(difference_type_1 d1 = eastl::distance(first1, last1); d1 >= d2; --d1) + { + if(eastl::equal(first1, i, first2, predicate)) + return first1; + if(d1 > d2) // To do: Find a way to make the algorithm more elegant. + { + ++first1; + ++i; + } + } + return last1; + } + return first1; // Just like with strstr, we return first1 if the match string is empty. + } + + + + // search_n helper functions + // + template + ForwardIterator // Generic implementation. + search_n_impl(ForwardIterator first, ForwardIterator last, Size count, const T& value, EASTL_ITC_NS::forward_iterator_tag) + { + if(count <= 0) + return first; + + Size d1 = (Size)eastl::distance(first, last); // Should d1 be of type Size, ptrdiff_t, or iterator_traits::difference_type? + // The problem with using iterator_traits::difference_type is that + if(count > d1) // ForwardIterator may not be a true iterator but instead something like a pointer. + return last; + + for(; d1 >= count; ++first, --d1) + { + ForwardIterator i(first); + + for(Size n = 0; n < count; ++n, ++i, --d1) + { + if(!(*i == value)) // Note that we always express value comparisons in terms of < or ==. + goto not_found; + } + return first; + + not_found: + first = i; + } + return last; + } + + template inline + RandomAccessIterator // Random access iterator implementation. Much faster than generic implementation. + search_n_impl(RandomAccessIterator first, RandomAccessIterator last, Size count, const T& value, EASTL_ITC_NS::random_access_iterator_tag) + { + if(count <= 0) + return first; + else if(count == 1) + return eastl::find(first, last, value); + else if(last > first) + { + RandomAccessIterator lookAhead; + RandomAccessIterator backTrack; + + Size skipOffset = (count - 1); + Size tailSize = (Size)(last - first); + Size remainder; + Size prevRemainder; + + for(lookAhead = first + skipOffset; tailSize >= count; lookAhead += count) + { + tailSize -= count; + + if(*lookAhead == value) + { + remainder = skipOffset; + + for(backTrack = lookAhead - 1; *backTrack == value; --backTrack) + { + if(--remainder == 0) + return (lookAhead - skipOffset); // success + } + + if(remainder <= tailSize) + { + prevRemainder = remainder; + + while(*(++lookAhead) == value) + { + if(--remainder == 0) + return (backTrack + 1); // success + } + tailSize -= (prevRemainder - remainder); + } + else + return last; // failure + } + + // lookAhead here is always pointing to the element of the last mismatch. + } + } + + return last; // failure + } + + + /// search_n + /// + /// Returns: The first iterator i in the range [first, last count) such that + /// for any nonnegative integer n less than count the following corresponding + /// conditions hold: *(i + n) == value, pred(*(i + n),value) != false. + /// Returns last if no such iterator is found. + /// + /// Complexity: At most '(last1 - first1) * count' applications of the corresponding predicate. + /// + template + ForwardIterator + search_n(ForwardIterator first, ForwardIterator last, Size count, const T& value) + { + typedef typename eastl::iterator_traits::iterator_category IC; + return eastl::search_n_impl(first, last, count, value, IC()); + } + + + /// binary_search + /// + /// Returns: true if there is an iterator i in the range [first last) that + /// satisfies the corresponding conditions: !(*i < value) && !(value < *i). + /// + /// Complexity: At most 'log(last - first) + 2' comparisons. + /// + /// Note: The reason binary_search returns bool instead of an iterator is + /// that search_n, lower_bound, or equal_range already return an iterator. + /// However, there are arguments that binary_search should return an iterator. + /// Note that we provide binary_search_i (STL extension) to return an iterator. + /// + /// To use search_n to find an item, do this: + /// iterator i = search_n(begin, end, 1, value); + /// To use lower_bound to find an item, do this: + /// iterator i = lower_bound(begin, end, value); + /// if((i != last) && !(value < *i)) + /// + /// It turns out that the above lower_bound method is as fast as binary_search + /// would be if it returned an iterator. + /// + template + inline bool + binary_search(ForwardIterator first, ForwardIterator last, const T& value) + { + // To do: This can be made slightly faster by not using lower_bound. + ForwardIterator i(eastl::lower_bound(first, last, value)); + return ((i != last) && !(value < *i)); // Note that we always express value comparisons in terms of < or ==. + } + + + /// binary_search + /// + /// Returns: true if there is an iterator i in the range [first last) that + /// satisfies the corresponding conditions: compare(*i, value) == false && + /// compare(value, *i) == false. + /// + /// Complexity: At most 'log(last - first) + 2' comparisons. + /// + /// Note: See comments above regarding the bool return value of binary_search. + /// + template + inline bool + binary_search(ForwardIterator first, ForwardIterator last, const T& value, Compare compare) + { + // To do: This can be made slightly faster by not using lower_bound. + ForwardIterator i(eastl::lower_bound(first, last, value, compare)); + return ((i != last) && !compare(value, *i)); + } + + + /// binary_search_i + /// + /// Returns: iterator if there is an iterator i in the range [first last) that + /// satisfies the corresponding conditions: !(*i < value) && !(value < *i). + /// Returns last if the value is not found. + /// + /// Complexity: At most 'log(last - first) + 2' comparisons. + /// + template + inline ForwardIterator + binary_search_i(ForwardIterator first, ForwardIterator last, const T& value) + { + // To do: This can be made slightly faster by not using lower_bound. + ForwardIterator i(eastl::lower_bound(first, last, value)); + if((i != last) && !(value < *i)) // Note that we always express value comparisons in terms of < or ==. + return i; + return last; + } + + + /// binary_search_i + /// + /// Returns: iterator if there is an iterator i in the range [first last) that + /// satisfies the corresponding conditions: !(*i < value) && !(value < *i). + /// Returns last if the value is not found. + /// + /// Complexity: At most 'log(last - first) + 2' comparisons. + /// + template + inline ForwardIterator + binary_search_i(ForwardIterator first, ForwardIterator last, const T& value, Compare compare) + { + // To do: This can be made slightly faster by not using lower_bound. + ForwardIterator i(eastl::lower_bound(first, last, value, compare)); + if((i != last) && !compare(value, *i)) + return i; + return last; + } + + + /// unique + /// + /// Given a sorted range, this function removes duplicated items. + /// Note that if you have a container then you will probably want + /// to call erase on the container with the return value if your + /// goal is to remove the duplicated items from the container. + /// + /// Effects: Eliminates all but the first element from every consecutive + /// group of equal elements referred to by the iterator i in the range + /// [first, last) for which the following corresponding condition holds: + /// *i == *(i - 1). + /// + /// Returns: The end of the resulting range. + /// + /// Complexity: If the range (last - first) is not empty, exactly (last - first) + /// applications of the corresponding predicate, otherwise no applications of the predicate. + /// + /// Example usage: + /// vector intArray; + /// ... + /// intArray.erase(unique(intArray.begin(), intArray.end()), intArray.end()); + /// + template + ForwardIterator unique(ForwardIterator first, ForwardIterator last) + { + first = eastl::adjacent_find(first, last); + + if(first != last) // We expect that there are duplicated items, else the user wouldn't be calling this function. + { + ForwardIterator dest(first); + + for(++first; first != last; ++first) + { + if(!(*dest == *first)) // Note that we always express value comparisons in terms of < or ==. + *++dest = *first; + } + return ++dest; + } + return last; + } + + + /// unique + /// + /// Given a sorted range, this function removes duplicated items. + /// Note that if you have a container then you will probably want + /// to call erase on the container with the return value if your + /// goal is to remove the duplicated items from the container. + /// + /// Effects: Eliminates all but the first element from every consecutive + /// group of equal elements referred to by the iterator i in the range + /// [first, last) for which the following corresponding condition holds: + /// predicate(*i, *(i - 1)) != false. + /// + /// Returns: The end of the resulting range. + /// + /// Complexity: If the range (last - first) is not empty, exactly (last - first) + /// applications of the corresponding predicate, otherwise no applications of the predicate. + /// + template + ForwardIterator unique(ForwardIterator first, ForwardIterator last, BinaryPredicate predicate) + { + first = eastl::adjacent_find(first, last, predicate); + + if(first != last) // We expect that there are duplicated items, else the user wouldn't be calling this function. + { + ForwardIterator dest(first); + + for(++first; first != last; ++first) + { + if(!predicate(*dest, *first)) + *++dest = *first; + } + return ++dest; + } + return last; + } + + + + // find_end + // + // We provide two versions here, one for a bidirectional iterators and one for + // regular forward iterators. Given that we are searching backward, it's a bit + // more efficient if we can use backwards iteration to implement our search, + // though this requires an iterator that can be reversed. + // + template + ForwardIterator1 + find_end_impl(ForwardIterator1 first1, ForwardIterator1 last1, + ForwardIterator2 first2, ForwardIterator2 last2, + EASTL_ITC_NS::forward_iterator_tag, EASTL_ITC_NS::forward_iterator_tag) + { + if(first2 != last2) // We have to do this check because the search algorithm below will return first1 (and not last1) if the first2/last2 range is empty. + { + for(ForwardIterator1 result(last1); ; ) + { + const ForwardIterator1 resultNext(eastl::search(first1, last1, first2, last2)); + + if(resultNext != last1) // If another sequence was found... + { + first1 = result = resultNext; + ++first1; + } + else + return result; + } + } + return last1; + } + + template + BidirectionalIterator1 + find_end_impl(BidirectionalIterator1 first1, BidirectionalIterator1 last1, + BidirectionalIterator2 first2, BidirectionalIterator2 last2, + EASTL_ITC_NS::bidirectional_iterator_tag, EASTL_ITC_NS::bidirectional_iterator_tag) + { + typedef eastl::reverse_iterator reverse_iterator1; + typedef eastl::reverse_iterator reverse_iterator2; + + reverse_iterator1 rresult(eastl::search(reverse_iterator1(last1), reverse_iterator1(first1), + reverse_iterator2(last2), reverse_iterator2(first2))); + if(rresult.base() != first1) // If we found something... + { + BidirectionalIterator1 result(rresult.base()); + + eastl::advance(result, -eastl::distance(first2, last2)); // We have an opportunity to optimize this, as the + return result; // search function already calculates this distance. + } + return last1; + } + + /// find_end + /// + /// Finds the last occurrence of the second sequence in the first sequence. + /// As such, this function is much like the C string function strrstr and it + /// is also the same as a reversed version of 'search'. It is called find_end + /// instead of the possibly more consistent search_end simply because the C++ + /// standard algorithms have such naming. + /// + /// Returns an iterator between first1 and last1 if the sequence is found. + /// returns last1 (the end of the first seqence) if the sequence is not found. + /// + template + inline ForwardIterator1 + find_end(ForwardIterator1 first1, ForwardIterator1 last1, + ForwardIterator2 first2, ForwardIterator2 last2) + { + typedef typename eastl::iterator_traits::iterator_category IC1; + typedef typename eastl::iterator_traits::iterator_category IC2; + + return eastl::find_end_impl(first1, last1, first2, last2, IC1(), IC2()); + } + + + + + // To consider: Fold the predicate and non-predicate versions of + // this algorithm into a single function. + template + ForwardIterator1 + find_end_impl(ForwardIterator1 first1, ForwardIterator1 last1, + ForwardIterator2 first2, ForwardIterator2 last2, + BinaryPredicate predicate, + EASTL_ITC_NS::forward_iterator_tag, EASTL_ITC_NS::forward_iterator_tag) + { + if(first2 != last2) // We have to do this check because the search algorithm below will return first1 (and not last1) if the first2/last2 range is empty. + { + for(ForwardIterator1 result = last1; ; ) + { + const ForwardIterator1 resultNext(eastl::search(first1, last1, first2, last2, predicate)); + + if(resultNext != last1) // If another sequence was found... + { + first1 = result = resultNext; + ++first1; + } + else + return result; + } + } + return last1; + } + + template + BidirectionalIterator1 + find_end_impl(BidirectionalIterator1 first1, BidirectionalIterator1 last1, + BidirectionalIterator2 first2, BidirectionalIterator2 last2, + BinaryPredicate predicate, + EASTL_ITC_NS::bidirectional_iterator_tag, EASTL_ITC_NS::bidirectional_iterator_tag) + { + typedef eastl::reverse_iterator reverse_iterator1; + typedef eastl::reverse_iterator reverse_iterator2; + + reverse_iterator1 rresult(eastl::search + (reverse_iterator1(last1), reverse_iterator1(first1), + reverse_iterator2(last2), reverse_iterator2(first2), + predicate)); + if(rresult.base() != first1) // If we found something... + { + BidirectionalIterator1 result(rresult.base()); + eastl::advance(result, -eastl::distance(first2, last2)); + return result; + } + return last1; + } + + + /// find_end + /// + /// Effects: Finds a subsequence of equal values in a sequence. + /// + /// Returns: The last iterator i in the range [first1, last1 - (last2 - first2)) + /// such that for any nonnegative integer n < (last2 - first2), the following + /// corresponding conditions hold: pred(*(i+n),*(first2+n)) != false. Returns + /// last1 if no such iterator is found. + /// + /// Complexity: At most (last2 - first2) * (last1 - first1 - (last2 - first2) + 1) + /// applications of the corresponding predicate. + /// + template + inline ForwardIterator1 + find_end(ForwardIterator1 first1, ForwardIterator1 last1, + ForwardIterator2 first2, ForwardIterator2 last2, + BinaryPredicate predicate) + { + typedef typename eastl::iterator_traits::iterator_category IC1; + typedef typename eastl::iterator_traits::iterator_category IC2; + + return eastl::find_end_impl + (first1, last1, first2, last2, predicate, IC1(), IC2()); + } + + + /// set_difference + /// + /// set_difference iterates over both input ranges and copies elements present + /// in the first range but not the second to the output range. + /// + /// Effects: Copies the elements of the range [first1, last1) which are not + /// present in the range [first2, last2) to the range beginning at result. + /// The elements in the constructed range are sorted. + /// + /// Requires: The input ranges must be sorted. + /// Requires: The output range shall not overlap with either of the original ranges. + /// + /// Returns: The end of the output range. + /// + /// Complexity: At most (2 * ((last1 - first1) + (last2 - first2)) - 1) comparisons. + /// + template + OutputIterator set_difference(InputIterator1 first1, InputIterator1 last1, + InputIterator2 first2, InputIterator2 last2, + OutputIterator result) + { + while((first1 != last1) && (first2 != last2)) + { + if(*first1 < *first2) + { + *result = *first1; + ++first1; + ++result; + } + else if(*first2 < *first1) + ++first2; + else + { + ++first1; + ++first2; + } + } + + return eastl::copy(first1, last1, result); + } + + + template + OutputIterator set_difference(InputIterator1 first1, InputIterator1 last1, + InputIterator2 first2, InputIterator2 last2, + OutputIterator result, Compare compare) + { + while((first1 != last1) && (first2 != last2)) + { + if(compare(*first1, *first2)) + { + EASTL_VALIDATE_COMPARE(!compare(*first2, *first1)); // Validate that the compare function is sane. + *result = *first1; + ++first1; + ++result; + } + else if(compare(*first2, *first1)) + { + EASTL_VALIDATE_COMPARE(!compare(*first1, *first2)); // Validate that the compare function is sane. + ++first2; + } + else + { + ++first1; + ++first2; + } + } + + return eastl::copy(first1, last1, result); + } + + + /// set_difference_2 + /// + /// set_difference_2 iterates over both input ranges and copies elements present + /// in the first range but not the second to the first output range and copies + /// elements present in the second range but not in the first to the second output + /// range. + /// + /// Effects: Copies the elements of the range [first1, last1) which are not + /// present in the range [first2, last2) to the first output range beginning at + /// result1 AND copies the element of range [first2, last2) which are not present + /// in the range [first1, last) to the second output range beginning at result2. + /// The elements in the constructed range are sorted. + /// + /// Requires: The input ranges must be sorted. + /// Requires: The output ranges shall not overlap with either of the original ranges. + /// + /// Returns: Nothing. + /// + /// Complexity: At most (2 * ((last1 - first1) + (last2 - first2)) - 1) comparisons. + /// + template + void set_difference_2(InputIterator1 first1, InputIterator1 last1, + InputIterator2 first2, InputIterator2 last2, + OutputIterator result1, OutputIterator result2, Compare compare) + { + while ((first1 != last1) && (first2 != last2)) + { + if (compare(*first1, *first2)) + { + EASTL_VALIDATE_COMPARE(!compare(*first2, *first1)); // Validate that the compare function is sane. + *result1++ = *first1++; + } + else if (compare(*first2, *first1)) + { + EASTL_VALIDATE_COMPARE(!compare(*first1, *first2)); // Validate that the compare function is sane. + *result2++ = *first2++; + } + else + { + ++first1; + ++first2; + } + } + + eastl::copy(first2, last2, result2); + eastl::copy(first1, last1, result1); + } + + /// set_difference_2 + /// + /// set_difference_2 with the default comparison object is eastl::less<>. + /// + template + void set_difference_2(InputIterator1 first1, InputIterator1 last1, + InputIterator2 first2, InputIterator2 last2, + OutputIterator result1, OutputIterator result2) + { + eastl::set_difference_2(first1, last1, first2, last2, result1, result2, eastl::less<>{}); + } + + + /// set_symmetric_difference + /// + /// set_difference iterates over both input ranges and copies elements present + /// in the either range but not the other to the output range. + /// + /// Effects: Copies the elements of the range [first1, last1) which are not + /// present in the range [first2, last2), and the elements of the range [first2, last2) + /// which are not present in the range [first1, last1) to the range beginning at result. + /// The elements in the constructed range are sorted. + /// + /// Requires: The input ranges must be sorted. + /// Requires: The resulting range shall not overlap with either of the original ranges. + /// + /// Returns: The end of the constructed range. + /// + /// Complexity: At most (2 * ((last1 - first1) + (last2 - first2)) - 1) comparisons. + /// + template + OutputIterator set_symmetric_difference(InputIterator1 first1, InputIterator1 last1, + InputIterator2 first2, InputIterator2 last2, + OutputIterator result) + { + while((first1 != last1) && (first2 != last2)) + { + if(*first1 < *first2) + { + *result = *first1; + ++first1; + ++result; + } + else if(*first2 < *first1) + { + *result = *first2; + ++first2; + ++result; + } + else + { + ++first1; + ++first2; + } + } + + return eastl::copy(first2, last2, eastl::copy(first1, last1, result)); + } + + + template + OutputIterator set_symmetric_difference(InputIterator1 first1, InputIterator1 last1, + InputIterator2 first2, InputIterator2 last2, + OutputIterator result, Compare compare) + { + while((first1 != last1) && (first2 != last2)) + { + if(compare(*first1, *first2)) + { + EASTL_VALIDATE_COMPARE(!compare(*first2, *first1)); // Validate that the compare function is sane. + *result = *first1; + ++first1; + ++result; + } + else if(compare(*first2, *first1)) + { + EASTL_VALIDATE_COMPARE(!compare(*first1, *first2)); // Validate that the compare function is sane. + *result = *first2; + ++first2; + ++result; + } + else + { + ++first1; + ++first2; + } + } + + return eastl::copy(first2, last2, eastl::copy(first1, last1, result)); + } + + + /// set_intersection + /// + /// set_intersection over both ranges and copies elements present in + /// both ranges to the output range. + /// + /// Effects: Constructs a sorted intersection of the elements from the + /// two ranges; that is, the set of elements that are present in both of the ranges. + /// + /// Requires: The input ranges must be sorted. + /// Requires: The resulting range shall not overlap with either of the original ranges. + /// + /// Returns: The end of the constructed range. + /// + /// Complexity: At most 2 * ((last1 - first1) + (last2 - first2)) - 1) comparisons. + /// + /// Note: The copying operation is stable; if an element is present in both ranges, + /// the one from the first range is copied. + /// + template + OutputIterator set_intersection(InputIterator1 first1, InputIterator1 last1, + InputIterator2 first2, InputIterator2 last2, + OutputIterator result) + { + while((first1 != last1) && (first2 != last2)) + { + if(*first1 < *first2) + ++first1; + else if(*first2 < *first1) + ++first2; + else + { + *result = *first1; + ++first1; + ++first2; + ++result; + } + } + + return result; + } + + + template + OutputIterator set_intersection(InputIterator1 first1, InputIterator1 last1, + InputIterator2 first2, InputIterator2 last2, + OutputIterator result, Compare compare) + { + while((first1 != last1) && (first2 != last2)) + { + if(compare(*first1, *first2)) + { + EASTL_VALIDATE_COMPARE(!compare(*first2, *first1)); // Validate that the compare function is sane. + ++first1; + } + else if(compare(*first2, *first1)) + { + EASTL_VALIDATE_COMPARE(!compare(*first1, *first2)); // Validate that the compare function is sane. + ++first2; + } + else + { + *result = *first1; + ++first1; + ++first2; + ++result; + } + } + + return result; + } + + + + /// set_union + /// + /// set_union iterates over both ranges and copies elements present in + /// both ranges to the output range. + /// + /// Effects: Constructs a sorted union of the elements from the two ranges; + /// that is, the set of elements that are present in one or both of the ranges. + /// + /// Requires: The input ranges must be sorted. + /// Requires: The resulting range shall not overlap with either of the original ranges. + /// + /// Returns: The end of the constructed range. + /// + /// Complexity: At most (2 * ((last1 - first1) + (last2 - first2)) - 1) comparisons. + /// + /// Note: The copying operation is stable; if an element is present in both ranges, + /// the one from the first range is copied. + /// + template + OutputIterator set_union(InputIterator1 first1, InputIterator1 last1, + InputIterator2 first2, InputIterator2 last2, + OutputIterator result) + { + while((first1 != last1) && (first2 != last2)) + { + if(*first1 < *first2) + { + *result = *first1; + ++first1; + } + else if(*first2 < *first1) + { + *result = *first2; + ++first2; + } + else + { + *result = *first1; + ++first1; + ++first2; + } + ++result; + } + + return eastl::copy(first2, last2, eastl::copy(first1, last1, result)); + } + + + template + OutputIterator set_union(InputIterator1 first1, InputIterator1 last1, + InputIterator2 first2, InputIterator2 last2, + OutputIterator result, Compare compare) + { + while((first1 != last1) && (first2 != last2)) + { + if(compare(*first1, *first2)) + { + EASTL_VALIDATE_COMPARE(!compare(*first2, *first1)); // Validate that the compare function is sane. + *result = *first1; + ++first1; + } + else if(compare(*first2, *first1)) + { + EASTL_VALIDATE_COMPARE(!compare(*first1, *first2)); // Validate that the compare function is sane. + *result = *first2; + ++first2; + } + else + { + *result = *first1; + ++first1; + ++first2; + } + ++result; + } + + return eastl::copy(first2, last2, eastl::copy(first1, last1, result)); + } + + + /// set_decomposition + /// + /// set_decomposition iterates over both ranges and copies elements to one of the three + /// categories of output ranges. + /// + /// Effects: Constructs three sorted containers of the elements from the two ranges. + /// * OutputIterator1 is elements only in Container1. + /// * OutputIterator2 is elements only in Container2. + /// * OutputIterator3 is elements that are in both Container1 and Container2. + /// + /// Requires: The input ranges must be sorted. + /// Requires: The resulting ranges shall not overlap with either of the original ranges. + /// + /// Returns: The end of the constructed range of elements in both Container1 and Container2. + /// + /// Complexity: At most (2 * ((last1 - first1) + (last2 - first2)) - 1) comparisons. + /// + template + OutputIterator3 set_decomposition(InputIterator1 first1, InputIterator1 last1, + InputIterator2 first2, InputIterator2 last2, + OutputIterator1 result1, OutputIterator2 result2, OutputIterator3 result3, Compare compare) + { + while ((first1 != last1) && (first2 != last2)) + { + if (compare(*first1, *first2)) + { + EASTL_VALIDATE_COMPARE(!compare(*first2, *first1)); // Validate that the compare function is sane. + *result1++ = *first1++; + } + else if (compare(*first2, *first1)) + { + EASTL_VALIDATE_COMPARE(!compare(*first1, *first2)); // Validate that the compare function is sane. + *result2++ = *first2++; + } + else + { + *result3++ = *first1++; + ++first2; + } + } + + eastl::copy(first1, last1, result1); + eastl::copy(first2, last2, result2); + + return result3; + } + + /// set_decomposition + /// + /// set_decomposition with the default comparison object is eastl::less<>. + /// + template + OutputIterator3 set_decomposition(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2, + OutputIterator1 result1, OutputIterator2 result2, OutputIterator3 result3) + { + return eastl::set_decomposition(first1, last1, first2, last2, result1, result2, result3, eastl::less<>{}); + } + + + /// is_permutation + /// + template + bool is_permutation(ForwardIterator1 first1, ForwardIterator1 last1, ForwardIterator2 first2) + { + typedef typename eastl::iterator_traits::difference_type difference_type; + + // Skip past any equivalent initial elements. + while((first1 != last1) && (*first1 == *first2)) + { + ++first1; + ++first2; + } + + if(first1 != last1) + { + const difference_type first1Size = eastl::distance(first1, last1); + ForwardIterator2 last2 = first2; + eastl::advance(last2, first1Size); + + for(ForwardIterator1 i = first1; i != last1; ++i) + { + if(i == eastl::find(first1, i, *i)) + { + const difference_type c = eastl::count(first2, last2, *i); + + if((c == 0) || (c != eastl::count(i, last1, *i))) + return false; + } + } + } + + return true; + } + + /// is_permutation + /// + template + bool is_permutation(ForwardIterator1 first1, ForwardIterator1 last1, ForwardIterator2 first2, BinaryPredicate predicate) + { + typedef typename eastl::iterator_traits::difference_type difference_type; + + // Skip past any equivalent initial elements. + while((first1 != last1) && predicate(*first1, *first2)) + { + ++first1; + ++first2; + } + + if(first1 != last1) + { + const difference_type first1Size = eastl::distance(first1, last1); + ForwardIterator2 last2 = first2; + eastl::advance(last2, first1Size); + + for(ForwardIterator1 i = first1; i != last1; ++i) + { + if(i == eastl::find(first1, i, *i, predicate)) + { + const difference_type c = eastl::count(first2, last2, *i, predicate); + + if((c == 0) || (c != eastl::count(i, last1, *i, predicate))) + return false; + } + } + } + + return true; + } + + + /// next_permutation + /// + /// mutates the range [first, last) to the next permutation. Returns true if the + /// new range is not the final permutation (sorted like the starting permutation). + /// Permutations start with a sorted range, and false is returned when next_permutation + /// results in the initial sorted range, or if the range has <= 1 element. + /// Note that elements are compared by operator < (as usual) and that elements deemed + /// equal via this are not rearranged. + /// + /// http://marknelson.us/2002/03/01/next-permutation/ + /// Basically we start with an ordered range and reverse it's order one specifically + /// chosen swap and reverse at a time. It happens that this require going through every + /// permutation of the range. We use the same variable names as the document above. + /// + /// To consider: Significantly improved permutation/combination functionality: + /// http://home.roadrunner.com/~hinnant/combinations.html + /// + /// Example usage: + /// vector intArray; + /// // + /// sort(intArray.begin(), intArray.end()); + /// do { + /// // + /// } while(next_permutation(intArray.begin(), intArray.end())); + /// + + template + bool next_permutation(BidirectionalIterator first, BidirectionalIterator last, Compare compare) + { + if(first != last) // If there is anything in the range... + { + BidirectionalIterator i = last; + + if(first != --i) // If the range has more than one item... + { + for(;;) + { + BidirectionalIterator ii(i), j; + + if(compare(*--i, *ii)) // Find two consecutive values where the first is less than the second. + { + j = last; + while(!compare(*i, *--j)) // Find the final value that's greater than the first (it may be equal to the second). + {} + eastl::iter_swap(i, j); // Swap the first and the final. + eastl::reverse(ii, last); // Reverse the ranget from second to last. + return true; + } + + if(i == first) // There are no two consecutive values where the first is less than the second, meaning the range is in reverse order. The reverse ordered range is always the last permutation. + { + eastl::reverse(first, last); + break; // We are done. + } + } + } + } + + return false; + } + + template + bool next_permutation(BidirectionalIterator first, BidirectionalIterator last) + { + typedef typename eastl::iterator_traits::value_type value_type; + + return eastl::next_permutation(first, last, eastl::less()); + } + + + + /// rotate + /// + /// Effects: For each non-negative integer i < (last - first), places the element from the + /// position first + i into position first + (i + (last - middle)) % (last - first). + /// + /// Returns: first + (last - middle). That is, returns where first went to. + /// + /// Remarks: This is a left rotate. + /// + /// Requires: [first,middle) and [middle,last) shall be valid ranges. ForwardIterator shall + /// satisfy the requirements of ValueSwappable (17.6.3.2). The type of *first shall satisfy + /// the requirements of MoveConstructible (Table 20) and the requirements of MoveAssignable. + /// + /// Complexity: At most last - first swaps. + /// + /// Note: While rotate works on ForwardIterators (e.g. slist) and BidirectionalIterators (e.g. list), + /// you can get much better performance (O(1) instead of O(n)) with slist and list rotation by + /// doing splice operations on those lists instead of calling this rotate function. + /// + /// http://www.cs.bell-labs.com/cm/cs/pearls/s02b.pdf / http://books.google.com/books?id=kse_7qbWbjsC&pg=PA14&lpg=PA14&dq=Programming+Pearls+flipping+hands + /// http://books.google.com/books?id=tjOlkl7ecVQC&pg=PA189&lpg=PA189&dq=stepanov+Elements+of+Programming+rotate + /// http://stackoverflow.com/questions/21160875/why-is-stdrotate-so-fast + /// + /// Strategy: + /// - We handle the special case of (middle == first) and (middle == last) no-ops + /// up front in the main rotate entry point. + /// - There's a basic ForwardIterator implementation (rotate_general_impl) which is + /// a fallback implementation that's not as fast as others but works for all cases. + /// - There's a slightly better BidirectionalIterator implementation. + /// - We have specialized versions for rotating elements that are trivially copyable. + /// These versions will use memmove for when we have a RandomAccessIterator. + /// - We have a specialized version for rotating by only a single position, as that allows us + /// (with any iterator type) to avoid a lot of logic involved with algorithms like "flipping hands" + /// and achieve near optimal O(n) behavior. it turns out that rotate-by-one is a common use + /// case in practice. + /// + namespace Internal + { + template + ForwardIterator rotate_general_impl(ForwardIterator first, ForwardIterator middle, ForwardIterator last) + { + using eastl::swap; + + ForwardIterator current = middle; + + do { + swap(*first++, *current++); + + if(first == middle) + middle = current; + } while(current != last); + + ForwardIterator result = first; + current = middle; + + while(current != last) + { + swap(*first++, *current++); + + if(first == middle) + middle = current; + else if(current == last) + current = middle; + } + + return result; // result points to first + (last - middle). + } + + + template + ForwardIterator move_rotate_left_by_one(ForwardIterator first, ForwardIterator last) + { + typedef typename eastl::iterator_traits::value_type value_type; + + value_type temp(eastl::move(*first)); + ForwardIterator result = eastl::move(eastl::next(first), last, first); // Note that while our template type is BidirectionalIterator, if the actual + *result = eastl::move(temp); // iterator is a RandomAccessIterator then this move will be a memmove for trivially copyable types. + + return result; // result points to the final element in the range. + } + + + template + BidirectionalIterator move_rotate_right_by_one(BidirectionalIterator first, BidirectionalIterator last) + { + typedef typename eastl::iterator_traits::value_type value_type; + + BidirectionalIterator beforeLast = eastl::prev(last); + value_type temp(eastl::move(*beforeLast)); + BidirectionalIterator result = eastl::move_backward(first, beforeLast, last); // Note that while our template type is BidirectionalIterator, if the actual + *first = eastl::move(temp); // iterator is a RandomAccessIterator then this move will be a memmove for trivially copyable types. + + return result; // result points to the first element in the range. + } + + template + struct rotate_helper + { + template + static ForwardIterator rotate_impl(ForwardIterator first, ForwardIterator middle, ForwardIterator last) + { return Internal::rotate_general_impl(first, middle, last); } + }; + + template <> + struct rotate_helper + { + template + static ForwardIterator rotate_impl(ForwardIterator first, ForwardIterator middle, ForwardIterator last) + { + if(eastl::next(first) == middle) // If moving trivial types by a single element, memcpy is fast for that case. + return Internal::move_rotate_left_by_one(first, last); + return Internal::rotate_general_impl(first, middle, last); + } + }; + + template <> + struct rotate_helper + { + template + static BidirectionalIterator rotate_impl(BidirectionalIterator first, BidirectionalIterator middle, BidirectionalIterator last) + { return Internal::rotate_general_impl(first, middle, last); } // rotate_general_impl outperforms the flipping hands algorithm. + + /* + // Simplest "flipping hands" implementation. Disabled because it's slower on average than rotate_general_impl. + template + static BidirectionalIterator rotate_impl(BidirectionalIterator first, BidirectionalIterator middle, BidirectionalIterator last) + { + eastl::reverse(first, middle); + eastl::reverse(middle, last); + eastl::reverse(first, last); + return first + (last - middle); // This can be slow for large ranges because operator + and - are O(n). + } + + // Smarter "flipping hands" implementation, but still disabled because benchmarks are showing it to be slower than rotate_general_impl. + template + static BidirectionalIterator rotate_impl(BidirectionalIterator first, BidirectionalIterator middle, BidirectionalIterator last) + { + // This is the "flipping hands" algorithm. + eastl::reverse_impl(first, middle, EASTL_ITC_NS::bidirectional_iterator_tag()); // Reverse the left side. + eastl::reverse_impl(middle, last, EASTL_ITC_NS::bidirectional_iterator_tag()); // Reverse the right side. + + // Reverse the entire range. + while((first != middle) && (middle != last)) + { + eastl::iter_swap(first, --last); + ++first; + } + + if(first == middle) // Finish reversing the entire range. + { + eastl::reverse_impl(middle, last, bidirectional_iterator_tag()); + return last; + } + else + { + eastl::reverse_impl(first, middle, bidirectional_iterator_tag()); + return first; + } + } + */ + }; + + template <> + struct rotate_helper + { + template + static BidirectionalIterator rotate_impl(BidirectionalIterator first, BidirectionalIterator middle, BidirectionalIterator last) + { + if(eastl::next(first) == middle) // If moving trivial types by a single element, memcpy is fast for that case. + return Internal::move_rotate_left_by_one(first, last); + if(eastl::next(middle) == last) + return Internal::move_rotate_right_by_one(first, last); + return Internal::rotate_general_impl(first, middle, last); + } + }; + + template + inline Integer greatest_common_divisor(Integer x, Integer y) + { + do { + Integer t = (x % y); + x = y; + y = t; + } while(y); + + return x; + } + + template <> + struct rotate_helper + { + // This is the juggling algorithm, using move operations. + // In practice this implementation is about 25% faster than rotate_general_impl. We may want to + // consider sticking with just rotate_general_impl and avoid the code generation of this function. + template + static RandomAccessIterator rotate_impl(RandomAccessIterator first, RandomAccessIterator middle, RandomAccessIterator last) + { + typedef typename iterator_traits::difference_type difference_type; + typedef typename iterator_traits::value_type value_type; + + const difference_type m1 = (middle - first); + const difference_type m2 = (last - middle); + const difference_type g = Internal::greatest_common_divisor(m1, m2); + value_type temp; + + for(RandomAccessIterator p = first + g; p != first;) + { + temp = eastl::move(*--p); + RandomAccessIterator p1 = p; + RandomAccessIterator p2 = p + m1; + do + { + *p1 = eastl::move(*p2); + p1 = p2; + const difference_type d = (last - p2); + + if(m1 < d) + p2 += m1; + else + p2 = first + (m1 - d); + } while(p2 != p); + + *p1 = eastl::move(temp); + } + + return first + m2; + } + }; + + template <> + struct rotate_helper + { + // Experiments were done which tested the performance of using an intermediate buffer + // to do memcpy's to as opposed to executing a swapping algorithm. It turns out this is + // actually slower than even rotate_general_impl, partly because the average case involves + // memcpy'ing a quarter of the element range twice. Experiments were done with various kinds + // of PODs with various element counts. + + template + static RandomAccessIterator rotate_impl(RandomAccessIterator first, RandomAccessIterator middle, RandomAccessIterator last) + { + if(eastl::next(first) == middle) // If moving trivial types by a single element, memcpy is fast for that case. + return Internal::move_rotate_left_by_one(first, last); + if(eastl::next(middle) == last) + return Internal::move_rotate_right_by_one(first, last); + if((last - first) < 32) // For small ranges rotate_general_impl is faster. + return Internal::rotate_general_impl(first, middle, last); + return Internal::rotate_helper::rotate_impl(first, middle, last); + } + }; + + } // namespace Internal + + + template + ForwardIterator rotate(ForwardIterator first, ForwardIterator middle, ForwardIterator last) + { + if(middle != first) + { + if(middle != last) + { + typedef typename eastl::iterator_traits::iterator_category IC; + typedef typename eastl::iterator_traits::value_type value_type; + + // the implementations for is_trivially_copyable types simply check whether we have a single element to rotate and if so, + // defer to either move_rotate_left_by_one or move_rotate_right_by_one, which are optimized for trivially copyable types. + // otherwise, use the same implementation as non-trivially copyable types. + return Internal::rotate_helper::value>::rotate_impl(first, middle, last); + } + + return first; + } + + return last; + } + + + + /// rotate_copy + /// + /// Similar to rotate except writes the output to the OutputIterator and + /// returns an OutputIterator to the element past the last element copied + /// (i.e. result + (last - first)) + /// + template + OutputIterator rotate_copy(ForwardIterator first, ForwardIterator middle, ForwardIterator last, OutputIterator result) + { + return eastl::copy(first, middle, eastl::copy(middle, last, result)); + } + + + + /// clamp + /// + /// Returns a reference to a clamped value within the range of [lo, hi]. + /// + /// http://en.cppreference.com/w/cpp/algorithm/clamp + /// + template + EA_CONSTEXPR const T& clamp(const T& v, const T& lo, const T& hi, Compare comp) + { + EASTL_ASSERT(!comp(hi, lo)); + return comp(v, lo) ? lo : comp(hi, v) ? hi : v; + } + + template + EA_CONSTEXPR const T& clamp(const T& v, const T& lo, const T& hi) + { + return eastl::clamp(v, lo, hi, eastl::less<>()); + } + + + /// is_partitioned + /// + /// Returns true if all the elements in the range [first, last) is empty, or is + /// partitioned by predicate. Being partitioned means that all elements v for which + /// predicate(v) evaluates to true appear before any elements for which predicate(v) + /// is false. + /// + template + EA_CONSTEXPR bool is_partitioned(InputIterator first, InputIterator last, UnaryPredicate predicate) + { + for (; first != last; ++first) + { + if (!predicate(*first)) + { + // advance the iterator, we don't need to call the predicate on this item + // again in the "false" loop below. + ++first; + break; + } + } + for (; first != last; ++first) + { + if (predicate(*first)) + { + return false; + } + } + return true; + } + + /// partition_point + /// + /// Precondition: for this function to work correctly the input range [first, last) + /// must be partitioned by the predicate. i.e. all values for which predicate(v) is + /// true should precede any value in the range for which predicate(v) is false. + /// + /// Returns: the iterator past the end of the first partition within [first, last) or + /// last if all elements satisfy the predicate. + /// + /// Note: this is a more general version of lower_bound. + template + EA_CONSTEXPR ForwardIterator partition_point(ForwardIterator first, ForwardIterator last, UnaryPredicate predicate) + { + // Just binary chop our way to the first one where predicate(x) is false + for (auto length = eastl::distance(first, last); 0 < length;) + { + const auto half = length / 2; + const auto middle = eastl::next(first, half); + if (predicate(*middle)) + { + first = eastl::next(middle); + length -= (half + 1); + } + else + { + length = half; + } + } + + return first; + } + +} // namespace eastl + + +#endif // Header include guard diff --git a/external/EASTL/include/EASTL/allocator.h b/external/EASTL/include/EASTL/allocator.h new file mode 100644 index 00000000..d6454668 --- /dev/null +++ b/external/EASTL/include/EASTL/allocator.h @@ -0,0 +1,397 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ALLOCATOR_H +#define EASTL_ALLOCATOR_H + + +#include +#include +#include + + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + + /// alloc_flags + /// + /// Defines allocation flags. + /// + enum alloc_flags + { + MEM_TEMP = 0, // Low memory, not necessarily actually temporary. + MEM_PERM = 1 // High memory, for things that won't be unloaded. + }; + + + /// allocator + /// + /// In this allocator class, note that it is not templated on any type and + /// instead it simply allocates blocks of memory much like the C malloc and + /// free functions. It can be thought of as similar to C++ std::allocator. + /// The flags parameter has meaning that is specific to the allocation + /// + /// C++11's std::allocator (20.6.9) doesn't have a move constructor or assignment + /// operator. This is possibly because std::allocators are associated with types + /// instead of as instances. The potential non-equivalance of C++ std::allocator + /// instances has been a source of some acknowledged design problems. + /// We don't implement support for move construction or assignment in eastl::allocator, + /// but users can define their own allocators which do have move functions and + /// the eastl containers are compatible with such allocators (i.e. nothing unexpected + /// will happen). + /// + class EASTL_API allocator + { + public: + EASTL_ALLOCATOR_EXPLICIT allocator(const char* pName = EASTL_NAME_VAL(EASTL_ALLOCATOR_DEFAULT_NAME)); + allocator(const allocator& x); + allocator(const allocator& x, const char* pName); + + allocator& operator=(const allocator& x); + + void* allocate(size_t n, int flags = 0); + void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0); + void deallocate(void* p, size_t n); + + const char* get_name() const; + void set_name(const char* pName); + + protected: + #if EASTL_NAME_ENABLED + const char* mpName; // Debug name, used to track memory. + #endif + }; + + bool operator==(const allocator& a, const allocator& b); +#if !defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON) + bool operator!=(const allocator& a, const allocator& b); +#endif + + + /// dummy_allocator + /// + /// Defines an allocator which does nothing. It returns NULL from allocate calls. + /// + class EASTL_API dummy_allocator + { + public: + EASTL_ALLOCATOR_EXPLICIT dummy_allocator(const char* = NULL) { } + dummy_allocator(const dummy_allocator&) { } + dummy_allocator(const dummy_allocator&, const char*) { } + + dummy_allocator& operator=(const dummy_allocator&) { return *this; } + + void* allocate(size_t, int = 0) { return NULL; } + void* allocate(size_t, size_t, size_t, int = 0) { return NULL; } + void deallocate(void*, size_t) { } + + const char* get_name() const { return ""; } + void set_name(const char*) { } + }; + + inline bool operator==(const dummy_allocator&, const dummy_allocator&) { return true; } +#if !defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON) + inline bool operator!=(const dummy_allocator&, const dummy_allocator&) { return false; } +#endif + + + /// Defines a static default allocator which is constant across all types. + /// This is different from get_default_allocator, which is is bound at + /// compile-time and expected to differ per allocator type. + /// Currently this Default Allocator applies only to CoreAllocatorAdapter. + /// To consider: This naming of this function is too similar to get_default_allocator + /// and instead should be named something like GetStaticDefaultAllocator. + EASTL_API allocator* GetDefaultAllocator(); + EASTL_API allocator* SetDefaultAllocator(allocator* pAllocator); + + + /// get_default_allocator + /// + /// This templated function allows the user to implement a default allocator + /// retrieval function that any part of EASTL can use. EASTL containers take + /// an Allocator parameter which identifies an Allocator class to use. But + /// different kinds of allocators have different mechanisms for retrieving + /// a default allocator instance, and some don't even intrinsically support + /// such functionality. The user can override this get_default_allocator + /// function in order to provide the glue between EASTL and whatever their + /// system's default allocator happens to be. + /// + /// Example usage: + /// MyAllocatorType* gpSystemAllocator; + /// + /// MyAllocatorType* get_default_allocator(const MyAllocatorType*) + /// { return gpSystemAllocator; } + /// + template + Allocator* get_default_allocator(const Allocator*); + + EASTLAllocatorType* get_default_allocator(const EASTLAllocatorType*); + + + /// default_allocfreemethod + /// + /// Implements a default allocfreemethod which uses the default global allocator. + /// This version supports only default alignment. + /// + void* default_allocfreemethod(size_t n, void* pBuffer, void* /*pContext*/); + + + /// allocate_memory + /// + /// This is a memory allocation dispatching function. + /// To do: Make aligned and unaligned specializations. + /// Note that to do this we will need to use a class with a static + /// function instead of a standalone function like below. + /// + template + void* allocate_memory(Allocator& a, size_t n, size_t alignment, size_t alignmentOffset); + + +} // namespace eastl + + + + + + +#ifndef EASTL_USER_DEFINED_ALLOCATOR // If the user hasn't declared that he has defined a different allocator implementation elsewhere... + + EA_DISABLE_ALL_VC_WARNINGS() + #include + EA_RESTORE_ALL_VC_WARNINGS() + + #if !EASTL_DLL // If building a regular library and not building EASTL as a DLL... + // It is expected that the application define the following + // versions of operator new for the application. Either that or the + // user needs to override the implementation of the allocator class. + void* operator new[](size_t size, const char* pName, int flags, unsigned debugFlags, const char* file, int line); + void* operator new[](size_t size, size_t alignment, size_t alignmentOffset, const char* pName, int flags, unsigned debugFlags, const char* file, int line); + #endif + + namespace eastl + { + inline allocator::allocator(const char* EASTL_NAME(pName)) + { + #if EASTL_NAME_ENABLED + mpName = pName ? pName : EASTL_ALLOCATOR_DEFAULT_NAME; + #endif + } + + + inline allocator::allocator(const allocator& EASTL_NAME(alloc)) + { + #if EASTL_NAME_ENABLED + mpName = alloc.mpName; + #endif + } + + + inline allocator::allocator(const allocator&, const char* EASTL_NAME(pName)) + { + #if EASTL_NAME_ENABLED + mpName = pName ? pName : EASTL_ALLOCATOR_DEFAULT_NAME; + #endif + } + + + inline allocator& allocator::operator=(const allocator& EASTL_NAME(alloc)) + { + #if EASTL_NAME_ENABLED + mpName = alloc.mpName; + #endif + return *this; + } + + + inline const char* allocator::get_name() const + { + #if EASTL_NAME_ENABLED + return mpName; + #else + return EASTL_ALLOCATOR_DEFAULT_NAME; + #endif + } + + + inline void allocator::set_name(const char* EASTL_NAME(pName)) + { + #if EASTL_NAME_ENABLED + mpName = pName; + #endif + } + + + inline void* allocator::allocate(size_t n, int flags) + { + #if EASTL_NAME_ENABLED + #define pName mpName + #else + #define pName EASTL_ALLOCATOR_DEFAULT_NAME + #endif + + #if EASTL_DLL + return allocate(n, EASTL_SYSTEM_ALLOCATOR_MIN_ALIGNMENT, 0, flags); + #elif (EASTL_DEBUGPARAMS_LEVEL <= 0) + return ::new((char*)0, flags, 0, (char*)0, 0) char[n]; + #elif (EASTL_DEBUGPARAMS_LEVEL == 1) + return ::new( pName, flags, 0, (char*)0, 0) char[n]; + #else + return ::new( pName, flags, 0, __FILE__, __LINE__) char[n]; + #endif + } + + + inline void* allocator::allocate(size_t n, size_t alignment, size_t offset, int flags) + { + #if EASTL_DLL + // We currently have no support for implementing flags when + // using the C runtime library operator new function. The user + // can use SetDefaultAllocator to override the default allocator. + EA_UNUSED(offset); EA_UNUSED(flags); + + size_t adjustedAlignment = (alignment > EA_PLATFORM_PTR_SIZE) ? alignment : EA_PLATFORM_PTR_SIZE; + + void* p = new char[n + adjustedAlignment + EA_PLATFORM_PTR_SIZE]; + void* pPlusPointerSize = (void*)((uintptr_t)p + EA_PLATFORM_PTR_SIZE); + void* pAligned = (void*)(((uintptr_t)pPlusPointerSize + adjustedAlignment - 1) & ~(adjustedAlignment - 1)); + + void** pStoredPtr = (void**)pAligned - 1; + EASTL_ASSERT(pStoredPtr >= p); + *(pStoredPtr) = p; + + EASTL_ASSERT(((size_t)pAligned & ~(alignment - 1)) == (size_t)pAligned); + + return pAligned; + #elif (EASTL_DEBUGPARAMS_LEVEL <= 0) + return ::new(alignment, offset, (char*)0, flags, 0, (char*)0, 0) char[n]; + #elif (EASTL_DEBUGPARAMS_LEVEL == 1) + return ::new(alignment, offset, pName, flags, 0, (char*)0, 0) char[n]; + #else + return ::new(alignment, offset, pName, flags, 0, __FILE__, __LINE__) char[n]; + #endif + + #undef pName // See above for the definition of this. + } + + + inline void allocator::deallocate(void* p, size_t) + { + #if EASTL_DLL + if (p != nullptr) + { + void* pOriginalAllocation = *((void**)p - 1); + delete[](char*)pOriginalAllocation; + } + #else + delete[](char*)p; + #endif + } + + + inline bool operator==(const allocator&, const allocator&) + { + return true; // All allocators are considered equal, as they merely use global new/delete. + } + +#if !defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON) + inline bool operator!=(const allocator&, const allocator&) + { + return false; // All allocators are considered equal, as they merely use global new/delete. + } +#endif + + } // namespace eastl + + +#endif // EASTL_USER_DEFINED_ALLOCATOR + + + +namespace eastl +{ + + template + inline Allocator* get_default_allocator(const Allocator*) + { + return NULL; // By default we return NULL; the user must make specialization of this function in order to provide their own implementation. + } + + + inline EASTLAllocatorType* get_default_allocator(const EASTLAllocatorType*) + { + return EASTLAllocatorDefault(); // For the built-in allocator EASTLAllocatorType, we happen to already have a function for returning the default allocator instance, so we provide it. + } + + + inline void* default_allocfreemethod(size_t n, void* pBuffer, void* /*pContext*/) + { + EASTLAllocatorType* const pAllocator = EASTLAllocatorDefault(); + + if(pBuffer) // If freeing... + { + EASTLFree(*pAllocator, pBuffer, n); + return NULL; // The return value is meaningless for the free. + } + else // allocating + return EASTLAlloc(*pAllocator, n); + } + + + /// allocate_memory + /// + /// This is a memory allocation dispatching function. + /// To do: Make aligned and unaligned specializations. + /// Note that to do this we will need to use a class with a static + /// function instead of a standalone function like below. + /// + template + inline void* allocate_memory(Allocator& a, size_t n, size_t alignment, size_t alignmentOffset) + { + void *result; + if (alignment <= EASTL_ALLOCATOR_MIN_ALIGNMENT) + { + result = EASTLAlloc(a, n); + // Ensure the result is correctly aligned. An assertion likely indicates a mismatch between EASTL_ALLOCATOR_MIN_ALIGNMENT and the minimum alignment + // of EASTLAlloc. If there is a mismatch it may be necessary to define EASTL_ALLOCATOR_MIN_ALIGNMENT to be the minimum alignment of EASTLAlloc, or + // to increase the alignment of EASTLAlloc to match EASTL_ALLOCATOR_MIN_ALIGNMENT. + EASTL_ASSERT((reinterpret_cast(result)& ~(alignment - 1)) == reinterpret_cast(result)); + } + else + { + result = EASTLAllocAligned(a, n, alignment, alignmentOffset); + // Ensure the result is correctly aligned. An assertion here may indicate a bug in the allocator. + auto resultMinusOffset = (char*)result - alignmentOffset; + EA_UNUSED(resultMinusOffset); + EASTL_ASSERT((reinterpret_cast(resultMinusOffset)& ~(alignment - 1)) == reinterpret_cast(resultMinusOffset)); + } + return result; + } + +} + + +#endif // Header include guard + + + + + + + + + + + + + + + + diff --git a/external/EASTL/include/EASTL/allocator_malloc.h b/external/EASTL/include/EASTL/allocator_malloc.h new file mode 100644 index 00000000..78f4f69d --- /dev/null +++ b/external/EASTL/include/EASTL/allocator_malloc.h @@ -0,0 +1,130 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ALLOCATOR_MALLOC_H +#define EASTL_ALLOCATOR_MALLOC_H + + +#include +#include +#include + + +// EASTL_ALIGNED_MALLOC_AVAILABLE +// +// Identifies if the standard library provides a built-in aligned version of malloc. +// Defined as 0 or 1, depending on the standard library or platform availability. +// None of the viable C functions provides for an aligned malloc with offset, so we +// don't consider that supported in any case. +// +// Options for aligned allocations: +// C11 aligned_alloc http://linux.die.net/man/3/aligned_alloc +// glibc memalign http://linux.die.net/man/3/posix_memalign +// Posix posix_memalign http://pubs.opengroup.org/onlinepubs/000095399/functions/posix_memalign.html +// VC++ _aligned_malloc http://msdn.microsoft.com/en-us/library/8z34s9c6%28VS.80%29.aspx This is not suitable, since it has a limitation that you need to free via _aligned_free. +// +#if !defined EASTL_ALIGNED_MALLOC_AVAILABLE + #if defined(EA_PLATFORM_POSIX) && !defined(EA_PLATFORM_APPLE) + // memalign is more consistently available than posix_memalign, though its location isn't consistent across + // platforms and compiler libraries. Typically it's declared in one of three headers: stdlib.h, malloc.h, or malloc/malloc.h + #include // memalign, posix_memalign. + #define EASTL_ALIGNED_MALLOC_AVAILABLE 1 + + #if EA_HAS_INCLUDE_AVAILABLE + #if EA_HAS_INCLUDE() + #include + #elif EA_HAS_INCLUDE() + #include + #endif + #elif defined(EA_PLATFORM_BSD) + #include + #elif defined(__clang__) + #if __has_include() + #include + #elif __has_include() + #include + #endif + #else + #include + #endif + #else + #define EASTL_ALIGNED_MALLOC_AVAILABLE 0 + #endif +#endif + + +namespace eastl +{ + + /////////////////////////////////////////////////////////////////////////////// + // allocator_malloc + // + // Implements an EASTL allocator that uses malloc/free as opposed to + // new/delete or PPMalloc Malloc/Free. + // + // Example usage: + // vector intVector; + // + class allocator_malloc + { + public: + allocator_malloc(const char* = NULL) + { } + + allocator_malloc(const allocator_malloc&) + { } + + allocator_malloc(const allocator_malloc&, const char*) + { } + + allocator_malloc& operator=(const allocator_malloc&) + { return *this; } + + bool operator==(const allocator_malloc&) + { return true; } + + bool operator!=(const allocator_malloc&) + { return false; } + + void* allocate(size_t n, int /*flags*/ = 0) + { return malloc(n); } + + void* allocate(size_t n, size_t alignment, size_t alignmentOffset, int /*flags*/ = 0) + { + #if EASTL_ALIGNED_MALLOC_AVAILABLE + if((alignmentOffset % alignment) == 0) // We check for (offset % alignmnent == 0) instead of (offset == 0) because any block which is aligned on e.g. 64 also is aligned at an offset of 64 by definition. + return memalign(alignment, n); // memalign is more consistently available than posix_memalign. + #else + if((alignment <= EASTL_SYSTEM_ALLOCATOR_MIN_ALIGNMENT) && ((alignmentOffset % alignment) == 0)) + return malloc(n); + #endif + return NULL; + } + + void deallocate(void* p, size_t /*n*/) + { free(p); } + + const char* get_name() const + { return "allocator_malloc"; } + + void set_name(const char*) + { } + }; + + +} // namespace eastl + + + +#endif // Header include guard + + + + + + + + + diff --git a/external/EASTL/include/EASTL/any.h b/external/EASTL/include/EASTL/any.h new file mode 100644 index 00000000..ca89e580 --- /dev/null +++ b/external/EASTL/include/EASTL/any.h @@ -0,0 +1,664 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +/////////////////////////////////////////////////////////////////////////////// +// This file implements the eastl::any which is part of the C++ standard STL +// library specification. +// +// eastl::any is a type-safe container for single values of any type. Our +// implementation makes use of the "small local buffer" optimization to avoid +// unnecessary dynamic memory allocation if the specified type is eligible to +// be stored in its local buffer. The user type must satisfy the size +// requirements and must be no-throw move-constructible to qualify for the local +// buffer optimization. +// +// To consider: Implement a fixed_any variant to allow users to customize +// the size of the "small local buffer" optimization. +// +// http://en.cppreference.com/w/cpp/utility/any +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ANY_H +#define EASTL_ANY_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + +#include +#include +#if EASTL_RTTI_ENABLED + #include +#endif +#if EASTL_EXCEPTIONS_ENABLED + #include +#endif + + +namespace eastl +{ + /////////////////////////////////////////////////////////////////////////////// + // bad_any_cast + // + // The type thrown by any_cast on failure. + // + // http://en.cppreference.com/w/cpp/utility/any/bad_any_cast + // + #if EASTL_EXCEPTIONS_ENABLED + struct bad_cast : std::exception + { + const char* what() const EA_NOEXCEPT EA_OVERRIDE + { return "bad cast"; } + }; + + struct bad_any_cast : public bad_cast + { + const char* what() const EA_NOEXCEPT EA_OVERRIDE + { return "bad_any_cast"; } + }; + #endif + + namespace Internal + { + // utility to switch between exceptions and asserts + inline void DoBadAnyCast() + { + #if EASTL_EXCEPTIONS_ENABLED + throw bad_any_cast(); + #else + EASTL_ASSERT_MSG(false, "bad_any_cast\n"); + + // NOTE(rparolin): CRASH! + // You crashed here because you requested a type that was not contained in the object. + // We choose to intentionally crash here instead of returning invalid data to the calling + // code which could cause hard to track down bugs. + *((volatile int*)0) = 0xDEADC0DE; + #endif + } + + template + void* DefaultConstruct(Args&&... args) + { + auto* pMem = EASTLAllocatorDefault()->allocate(sizeof(T), alignof(T), 0); + + return ::new(pMem) T(eastl::forward(args)...); + } + + template + void DefaultDestroy(T* p) + { + p->~T(); + + EASTLAllocatorDefault()->deallocate(static_cast(p), sizeof(T)); + } + } + + + /////////////////////////////////////////////////////////////////////////////// + // 20.7.3, class any + // + class any + { + ////////////////////////////////////////////////////////////////////////////////////////// + // storage_operation + // + // operations supported by the storage handler + // + enum class storage_operation + { + GET, + DESTROY, + COPY, + MOVE, + TYPE_INFO + }; + + + ////////////////////////////////////////////////////////////////////////////////////////// + // storage + // + // the underlying storage type which enables the switching between objects stored in + // the heap and objects stored within the any type. + // + union storage + { + typedef aligned_storage_t<4 * sizeof(void*), alignment_of::value> internal_storage_t; + + void* external_storage = nullptr; + internal_storage_t internal_storage; + }; + + + ////////////////////////////////////////////////////////////////////////////////////////// + // use_internal_storage + // + // determines when the "local buffer optimization" is used + // + template + using use_internal_storage = bool_constant + < + is_nothrow_move_constructible::value + && (sizeof(T) <= sizeof(storage)) && + (alignment_of::value % alignment_of::value == 0) + >; + + + ////////////////////////////////////////////////////////////////////////////////////////// + // non-member friend functions + // + template friend const ValueType* any_cast(const any* pAny) EA_NOEXCEPT; + template friend ValueType* any_cast(any* pAny) EA_NOEXCEPT; + template friend ValueType any_cast(const any& operand); + template friend ValueType any_cast(any& operand); + template friend ValueType any_cast(any&& operand); + + //Adding Unsafe any cast operations + template friend const ValueType* unsafe_any_cast(const any* pAny) EA_NOEXCEPT; + template friend ValueType* unsafe_any_cast(any* pAny) EA_NOEXCEPT; + + + ////////////////////////////////////////////////////////////////////////////////////////// + // internal storage handler + // + template + struct storage_handler_internal + { + template + static void construct(storage& s, V&& v) + { + ::new(&s.internal_storage) T(eastl::forward(v)); + } + + template + static void construct_inplace(storage& s, Args... args) + { + ::new(&s.internal_storage) T(eastl::forward(args)...); + } + + template + static void construct_inplace(storage& s, std::initializer_list il, Args&&... args) + { + ::new(&s.internal_storage) NT(il, eastl::forward(args)...); + } + + static inline void destroy(any& refAny) + { + T& t = *static_cast(static_cast(&refAny.m_storage.internal_storage)); + EA_UNUSED(t); + t.~T(); + + refAny.m_handler = nullptr; + } + + static void* get(const any* pThis) + { + EASTL_ASSERT(pThis); + return (void*)(&pThis->m_storage.internal_storage); + } + + static void* handler_func(storage_operation op, const any* pThis, any* pOther) + { + switch (op) + { + case storage_operation::GET: + { + return get(pThis); + } + break; + + case storage_operation::DESTROY: + { + EASTL_ASSERT(pThis); + destroy(const_cast(*pThis)); + } + break; + + case storage_operation::COPY: + { + EASTL_ASSERT(pThis); + EASTL_ASSERT(pOther); + construct(pOther->m_storage, *(T*)(&pThis->m_storage.internal_storage)); + } + break; + + case storage_operation::MOVE: + { + EASTL_ASSERT(pThis); + EASTL_ASSERT(pOther); + construct(pOther->m_storage, eastl::move(*(T*)(&pThis->m_storage.internal_storage))); + destroy(const_cast(*pThis)); + } + break; + + case storage_operation::TYPE_INFO: + { + #if EASTL_RTTI_ENABLED + return (void*)&typeid(T); + #endif + } + break; + + default: + { + EASTL_ASSERT_MSG(false, "unknown storage operation\n"); + } + break; + }; + + return nullptr; + } + }; + + + + ////////////////////////////////////////////////////////////////////////////////////////// + // external storage handler + // + template + struct storage_handler_external + { + template + static inline void construct(storage& s, V&& v) + { + s.external_storage = Internal::DefaultConstruct(eastl::forward(v)); + } + + template + static inline void construct_inplace(storage& s, Args... args) + { + s.external_storage = Internal::DefaultConstruct(eastl::forward(args)...); + } + + template + static inline void construct_inplace(storage& s, std::initializer_list il, Args&&... args) + { + s.external_storage = Internal::DefaultConstruct(il, eastl::forward(args)...); + } + + static inline void destroy(any& refAny) + { + Internal::DefaultDestroy(static_cast(refAny.m_storage.external_storage)); + + refAny.m_handler = nullptr; + } + + static void* get(const any* pThis) + { + EASTL_ASSERT(pThis); + EASTL_ASSERT(pThis->m_storage.external_storage); + return static_cast(pThis->m_storage.external_storage); + } + + static void* handler_func(storage_operation op, const any* pThis, any* pOther) + { + switch (op) + { + case storage_operation::GET: + { + return get(pThis); + } + break; + + case storage_operation::DESTROY: + { + EASTL_ASSERT(pThis); + destroy(*const_cast(pThis)); + } + break; + + case storage_operation::COPY: + { + EASTL_ASSERT(pThis); + EASTL_ASSERT(pOther); + construct(pOther->m_storage, *static_cast(pThis->m_storage.external_storage)); + } + break; + + case storage_operation::MOVE: + { + EASTL_ASSERT(pThis); + EASTL_ASSERT(pOther); + construct(pOther->m_storage, eastl::move(*(T*)(pThis->m_storage.external_storage))); + destroy(const_cast(*pThis)); + } + break; + + case storage_operation::TYPE_INFO: + { + #if EASTL_RTTI_ENABLED + return (void*)&typeid(T); + #endif + } + break; + + default: + { + EASTL_ASSERT_MSG(false, "unknown storage operation\n"); + } + break; + }; + + return nullptr; + } + }; + + + ////////////////////////////////////////////////////////////////////////////////////////// + // storage_handler_ptr + // + // defines the function signature of the storage handler that both the internal and + // external storage handlers must implement to retrieve the underlying type of the any + // object. + // + using storage_handler_ptr = void* (*)(storage_operation, const any*, any*); + + + ////////////////////////////////////////////////////////////////////////////////////////// + // storage_handler + // + // based on the specified type T we select the appropriate underlying storage handler + // based on the 'use_internal_storage' trait. + // + template + using storage_handler = typename conditional::value, + storage_handler_internal, + storage_handler_external>::type; + + + ////////////////////////////////////////////////////////////////////////////////////////// + // data layout + // + storage m_storage; + storage_handler_ptr m_handler; + + public: + #ifndef EA_COMPILER_GNUC + // TODO(rparolin): renable constexpr for GCC + EA_CONSTEXPR + #endif + any() EA_NOEXCEPT + : m_storage(), m_handler(nullptr) {} + + any(const any& other) : m_handler(nullptr) + { + if (other.m_handler) + { + // NOTE(rparolin): You can not simply copy the underlying + // storage because it could hold a pointer to an object on the + // heap which breaks the copy semantics of the language. + other.m_handler(storage_operation::COPY, &other, this); + m_handler = other.m_handler; + } + } + + any(any&& other) EA_NOEXCEPT : m_handler(nullptr) + { + if(other.m_handler) + { + // NOTE(rparolin): You can not simply move the underlying + // storage because because the storage class has effectively + // type erased user type so we have to defer to the handler + // function to get the type back and pass on the move request. + m_handler = eastl::move(other.m_handler); + other.m_handler(storage_operation::MOVE, &other, this); + } + } + + ~any() { reset(); } + + template + any(ValueType&& value, + typename eastl::enable_if::type, any>::value>::type* = 0) + { + typedef decay_t DecayedValueType; + static_assert(is_copy_constructible::value, "ValueType must be copy-constructible"); + storage_handler::construct(m_storage, eastl::forward(value)); + m_handler = &storage_handler::handler_func; + } + + template + explicit any(in_place_type_t, Args&&... args) + { + typedef storage_handler> StorageHandlerT; + static_assert(eastl::is_constructible::value, "T must be constructible with Args..."); + + StorageHandlerT::construct_inplace(m_storage, eastl::forward(args)...); + m_handler = &StorageHandlerT::handler_func; + } + + template + explicit any(in_place_type_t, + std::initializer_list il, + Args&&... args, + typename eastl::enable_if&, Args...>::value, + void>::type* = 0) + { + typedef storage_handler> StorageHandlerT; + + StorageHandlerT::construct_inplace(m_storage, il, eastl::forward(args)...); + m_handler = &StorageHandlerT::handler_func; + } + + // 20.7.3.2, assignments + template + any& operator=(ValueType&& value) + { + static_assert(is_copy_constructible>::value, "ValueType must be copy-constructible"); + any(eastl::forward(value)).swap(*this); + return *this; + } + + any& operator=(const any& other) + { + any(other).swap(*this); + return *this; + } + + any& operator=(any&& other) EA_NOEXCEPT + { + any(eastl::move(other)).swap(*this); + return *this; + } + + // 20.7.3.3, modifiers + #if EASTL_VARIADIC_TEMPLATES_ENABLED + template + typename eastl::enable_if, Args...> && eastl::is_copy_constructible_v>, eastl::decay_t&>::type + emplace(Args&&... args) + { + typedef storage_handler> StorageHandlerT; + + reset(); + StorageHandlerT::construct_inplace(m_storage, eastl::forward(args)...); + m_handler = &StorageHandlerT::handler_func; + return *static_cast*>(StorageHandlerT::get(this)); + } + + template + typename eastl::enable_if, std::initializer_list&, Args...> && eastl::is_copy_constructible_v>, eastl::decay_t&>::type + emplace(std::initializer_list il, Args&&... args) + { + typedef storage_handler> StorageHandlerT; + + reset(); + StorageHandlerT::construct_inplace(m_storage, il, eastl::forward(args)...); + m_handler = &StorageHandlerT::handler_func; + return *static_cast*>(StorageHandlerT::get(this)); + } + #endif + + void reset() EA_NOEXCEPT + { + if(m_handler) + m_handler(storage_operation::DESTROY, this, nullptr); + } + + void swap(any& other) EA_NOEXCEPT + { + if(this == &other) + return; + + if(m_handler && other.m_handler) + { + any tmp; + tmp.m_handler = other.m_handler; + other.m_handler(storage_operation::MOVE, &other, &tmp); + + other.m_handler = m_handler; + m_handler(storage_operation::MOVE, this, &other); + + m_handler = tmp.m_handler; + tmp.m_handler(storage_operation::MOVE, &tmp, this); + } + else if (m_handler == nullptr && other.m_handler) + { + eastl::swap(m_handler, other.m_handler); + m_handler(storage_operation::MOVE, &other, this); + } + else if(m_handler && other.m_handler == nullptr) + { + eastl::swap(m_handler, other.m_handler); + other.m_handler(storage_operation::MOVE, this, &other); + } + //else if (m_handler == nullptr && other.m_handler == nullptr) + //{ + // // nothing to swap + //} + } + + // 20.7.3.4, observers + bool has_value() const EA_NOEXCEPT { return m_handler != nullptr; } + + #if EASTL_RTTI_ENABLED + inline const std::type_info& type() const EA_NOEXCEPT + { + if(m_handler) + { + auto* pTypeInfo = m_handler(storage_operation::TYPE_INFO, this, nullptr); + return *static_cast(pTypeInfo); + } + else + { + return typeid(void); + } + } + #endif + }; + + + + ////////////////////////////////////////////////////////////////////////////////////////// + // 20.7.4, non-member functions + // + inline void swap(any& rhs, any& lhs) EA_NOEXCEPT { rhs.swap(lhs); } + + + ////////////////////////////////////////////////////////////////////////////////////////// + // 20.7.4, The non-member any_cast functions provide type-safe access to the contained object. + // + template + inline ValueType any_cast(const any& operand) + { + static_assert(eastl::is_reference::value || eastl::is_copy_constructible::value, + "ValueType must be a reference or copy constructible"); + + auto* p = any_cast::type>::type>(&operand); + + if(p == nullptr) + Internal::DoBadAnyCast(); + + return *p; + } + + template + inline ValueType any_cast(any& operand) + { + static_assert(eastl::is_reference::value || eastl::is_copy_constructible::value, + "ValueType must be a reference or copy constructible"); + + auto* p = any_cast::type>(&operand); + + if(p == nullptr) + Internal::DoBadAnyCast(); + + return *p; + } + + template + inline ValueType any_cast(any&& operand) + { + static_assert(eastl::is_reference::value || eastl::is_copy_constructible::value, + "ValueType must be a reference or copy constructible"); + + auto* p = any_cast::type>(&operand); + + if (p == nullptr) + Internal::DoBadAnyCast(); + + return *p; + } + + // NOTE(rparolin): The runtime type check was commented out because in DLL builds the templated function pointer + // value will be different -- completely breaking the validation mechanism. Due to the fact that eastl::any uses + // type erasure we can't refresh (on copy/move) the cached function pointer to the internal handler function because + // we don't statically know the type. + template + inline const ValueType* any_cast(const any* pAny) EA_NOEXCEPT + { + return (pAny && pAny->m_handler EASTL_IF_NOT_DLL(== &any::storage_handler>::handler_func) + #if EASTL_RTTI_ENABLED + && pAny->type() == typeid(typename remove_reference::type) + #endif + ) ? + static_cast(pAny->m_handler(any::storage_operation::GET, pAny, nullptr)) : + nullptr; + } + + template + inline ValueType* any_cast(any* pAny) EA_NOEXCEPT + { + return (pAny && pAny->m_handler EASTL_IF_NOT_DLL(== &any::storage_handler>::handler_func) + #if EASTL_RTTI_ENABLED + && pAny->type() == typeid(typename remove_reference::type) + #endif + ) ? + static_cast(pAny->m_handler(any::storage_operation::GET, pAny, nullptr)) : + nullptr; + } + + //Unsafe operations - use with caution + template + inline const ValueType* unsafe_any_cast(const any* pAny) EA_NOEXCEPT + { + return unsafe_any_cast(const_cast(pAny)); + } + + template + inline ValueType* unsafe_any_cast(any* pAny) EA_NOEXCEPT + { + return static_cast(pAny->m_handler(any::storage_operation::GET, pAny, nullptr)); + } + + ////////////////////////////////////////////////////////////////////////////////////////// + // make_any + // + #if EASTL_VARIADIC_TEMPLATES_ENABLED + template + inline any make_any(Args&&... args) + { + return any(eastl::in_place, eastl::forward(args)...); + } + + template + inline any make_any(std::initializer_list il, Args&&... args) + { + return any(eastl::in_place, il, eastl::forward(args)...); + } + #endif + +} // namespace eastl + +#endif // EASTL_ANY_H diff --git a/external/EASTL/include/EASTL/array.h b/external/EASTL/include/EASTL/array.h new file mode 100644 index 00000000..64297fc0 --- /dev/null +++ b/external/EASTL/include/EASTL/array.h @@ -0,0 +1,691 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +/////////////////////////////////////////////////////////////////////////////// +// Implements a templated array class as per the C++ standard TR1 (technical +// report 1, which is a list of proposed C++ library amendments). +// The primary distinctions between this array and TR1 array are: +// - array::size_type is defined as eastl_size_t instead of size_t in order +// to save memory and run faster on 64 bit systems. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ARRAY_H +#define EASTL_ARRAY_H + + +#include +#include +#include +#include +#include +#include + +#if EASTL_EXCEPTIONS_ENABLED + EA_DISABLE_ALL_VC_WARNINGS() + #include // std::out_of_range, std::length_error. + EA_RESTORE_ALL_VC_WARNINGS() +#endif + +// 4512/4626 - 'class' : assignment operator could not be generated. // This disabling would best be put elsewhere. +EA_DISABLE_VC_WARNING(4512 4626); + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + + /////////////////////////////////////////////////////////////////////// + /// array + /// + /// Implements a templated array class as per the C++ standard TR1. + /// This class allows you to use a built-in C style array like an STL vector. + /// It does not let you change its size, as it is just like a C built-in array. + /// Our implementation here strives to remove function call nesting, as that + /// makes it hard for us to profile debug builds due to function call overhead. + /// Note that this is intentionally a struct with public data, as per the + /// C++ standard update proposal requirements. + /// + /// Example usage: + /// array a = { { 0, 1, 2, 3, 4 } }; // Strict compilers such as GCC require the double brackets. + /// a[2] = 4; + /// for(array::iterator i = a.begin(); i < a.end(); ++i) + /// *i = 0; + /// + template + struct array + { + public: + typedef array this_type; + typedef T value_type; + typedef value_type& reference; + typedef const value_type& const_reference; + typedef value_type* iterator; + typedef const value_type* const_iterator; + typedef eastl::reverse_iterator reverse_iterator; + typedef eastl::reverse_iterator const_reverse_iterator; + typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t. + typedef ptrdiff_t difference_type; + + enum + { + count EASTL_REMOVE_AT_2024_APRIL = N + }; + + // Note that the member data is intentionally public. + // This allows for aggregate initialization of the + // object (e.g. array a = { 0, 3, 2, 4 }; ) + // do not use this member directly (use data() instead). + value_type mValue[N]; + + // We intentionally provide no constructor, destructor, or assignment operator. + + void fill(const value_type& value); + + // Unlike the swap function for other containers, array::swap takes linear time, + // may exit via an exception, and does not cause iterators to become associated with the other container. + void swap(this_type& x) EA_NOEXCEPT_IF(eastl::is_nothrow_swappable::value); + + EA_CPP14_CONSTEXPR iterator begin() EA_NOEXCEPT; + EA_CPP14_CONSTEXPR const_iterator begin() const EA_NOEXCEPT; + EA_CPP14_CONSTEXPR const_iterator cbegin() const EA_NOEXCEPT; + + EA_CPP14_CONSTEXPR iterator end() EA_NOEXCEPT; + EA_CPP14_CONSTEXPR const_iterator end() const EA_NOEXCEPT; + EA_CPP14_CONSTEXPR const_iterator cend() const EA_NOEXCEPT; + + EA_CPP14_CONSTEXPR reverse_iterator rbegin() EA_NOEXCEPT; + EA_CPP14_CONSTEXPR const_reverse_iterator rbegin() const EA_NOEXCEPT; + EA_CPP14_CONSTEXPR const_reverse_iterator crbegin() const EA_NOEXCEPT; + + EA_CPP14_CONSTEXPR reverse_iterator rend() EA_NOEXCEPT; + EA_CPP14_CONSTEXPR const_reverse_iterator rend() const EA_NOEXCEPT; + EA_CPP14_CONSTEXPR const_reverse_iterator crend() const EA_NOEXCEPT; + + EA_CPP14_CONSTEXPR bool empty() const EA_NOEXCEPT; + EA_CPP14_CONSTEXPR size_type size() const EA_NOEXCEPT; + EA_CPP14_CONSTEXPR size_type max_size() const EA_NOEXCEPT; + + EA_CPP14_CONSTEXPR T* data() EA_NOEXCEPT; + EA_CPP14_CONSTEXPR const T* data() const EA_NOEXCEPT; + + EA_CPP14_CONSTEXPR reference operator[](size_type i); + EA_CPP14_CONSTEXPR const_reference operator[](size_type i) const; + EA_CPP14_CONSTEXPR const_reference at(size_type i) const; + EA_CPP14_CONSTEXPR reference at(size_type i); + + EA_CPP14_CONSTEXPR reference front(); + EA_CPP14_CONSTEXPR const_reference front() const; + + EA_CPP14_CONSTEXPR reference back(); + EA_CPP14_CONSTEXPR const_reference back() const; + + bool validate() const; + int validate_iterator(const_iterator i) const; + + }; // class array + + // declaring a C-style array of size 0 is not valid C++. + // thus, we have to declare this partial specialization: + template + struct array + { + public: + typedef array this_type; + typedef T value_type; + typedef value_type& reference; + typedef const value_type& const_reference; + typedef value_type* iterator; + typedef const value_type* const_iterator; + typedef eastl::reverse_iterator reverse_iterator; + typedef eastl::reverse_iterator const_reverse_iterator; + typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t. + typedef ptrdiff_t difference_type; + + enum + { + count EASTL_REMOVE_AT_2024_APRIL = 0 + }; + + // We intentionally provide no constructor, destructor, or assignment operator. + + void fill(const value_type&) {} + + // Unlike the swap function for other containers, array::swap takes linear time, + // may exit via an exception, and does not cause iterators to become associated with the other container. + void swap(this_type&) EA_NOEXCEPT {} + + EA_CPP14_CONSTEXPR iterator begin() EA_NOEXCEPT { return nullptr; } + EA_CPP14_CONSTEXPR const_iterator begin() const EA_NOEXCEPT { return nullptr; } + EA_CPP14_CONSTEXPR const_iterator cbegin() const EA_NOEXCEPT { return nullptr; } + + EA_CPP14_CONSTEXPR iterator end() EA_NOEXCEPT { return nullptr; } + EA_CPP14_CONSTEXPR const_iterator end() const EA_NOEXCEPT { return nullptr; } + EA_CPP14_CONSTEXPR const_iterator cend() const EA_NOEXCEPT { return nullptr; } + + EA_CPP14_CONSTEXPR reverse_iterator rbegin() EA_NOEXCEPT { return reverse_iterator(nullptr); } + EA_CPP14_CONSTEXPR const_reverse_iterator rbegin() const EA_NOEXCEPT { return const_reverse_iterator(nullptr); } + EA_CPP14_CONSTEXPR const_reverse_iterator crbegin() const EA_NOEXCEPT { return const_reverse_iterator(nullptr); } + + EA_CPP14_CONSTEXPR reverse_iterator rend() EA_NOEXCEPT { return reverse_iterator(nullptr); } + EA_CPP14_CONSTEXPR const_reverse_iterator rend() const EA_NOEXCEPT { return const_reverse_iterator(nullptr); } + EA_CPP14_CONSTEXPR const_reverse_iterator crend() const EA_NOEXCEPT { return const_reverse_iterator(nullptr); } + + EA_CPP14_CONSTEXPR bool empty() const EA_NOEXCEPT { return true; } + EA_CPP14_CONSTEXPR size_type size() const EA_NOEXCEPT { return 0; } + EA_CPP14_CONSTEXPR size_type max_size() const EA_NOEXCEPT { return 0; } + + EA_CPP14_CONSTEXPR T* data() EA_NOEXCEPT { return nullptr; } + EA_CPP14_CONSTEXPR const T* data() const EA_NOEXCEPT { return nullptr; } + + EA_CPP14_CONSTEXPR reference operator[](size_type) { return *data(); } + EA_CPP14_CONSTEXPR const_reference operator[](size_type) const { return *data(); } + + EA_DISABLE_VC_WARNING(4702); // unreachable code + EA_CPP14_CONSTEXPR const_reference at(size_type) const + { +#if EASTL_EXCEPTIONS_ENABLED + throw std::out_of_range("array::at -- out of range"); +#elif EASTL_ASSERT_ENABLED + EASTL_FAIL_MSG("array::at -- out of range"); +#endif + return *data(); + } + EA_RESTORE_VC_WARNING(); + + EA_DISABLE_VC_WARNING(4702); // unreachable code + EA_CPP14_CONSTEXPR reference at(size_type) + { +#if EASTL_EXCEPTIONS_ENABLED + throw std::out_of_range("array::at -- out of range"); +#elif EASTL_ASSERT_ENABLED + EASTL_FAIL_MSG("array::at -- out of range"); +#endif + return *data(); + } + EA_RESTORE_VC_WARNING(); + + EA_CPP14_CONSTEXPR reference front() { return *data(); } + EA_CPP14_CONSTEXPR const_reference front() const { return *data(); } + + EA_CPP14_CONSTEXPR reference back() { return *data(); } + EA_CPP14_CONSTEXPR const_reference back() const { return *data(); } + + bool validate() const { return true; } + int validate_iterator(const_iterator) const { return isf_none; } + + }; // class array + + + + /////////////////////////////////////////////////////////////////////////// + // template deduction guides + /////////////////////////////////////////////////////////////////////////// + #ifdef __cpp_deduction_guides + template array(T, U...) -> array; + #endif + + + /////////////////////////////////////////////////////////////////////// + // array + /////////////////////////////////////////////////////////////////////// + + + template + inline void array::fill(const value_type& value) + { + eastl::fill_n(&mValue[0], N, value); + } + + + template + inline void array::swap(this_type& x) EA_NOEXCEPT_IF(eastl::is_nothrow_swappable::value) + { + eastl::swap_ranges(&mValue[0], &mValue[N], &x.mValue[0]); + } + + + template + EA_CPP14_CONSTEXPR inline typename array::iterator + array::begin() EA_NOEXCEPT + { + return &mValue[0]; + } + + + template + EA_CPP14_CONSTEXPR inline typename array::const_iterator + array::begin() const EA_NOEXCEPT + { + return &mValue[0]; + } + + + template + EA_CPP14_CONSTEXPR inline typename array::const_iterator + array::cbegin() const EA_NOEXCEPT + { + return &mValue[0]; + } + + + template + EA_CPP14_CONSTEXPR inline typename array::iterator + array::end() EA_NOEXCEPT + { + return &mValue[N]; + } + + + template + EA_CPP14_CONSTEXPR inline typename array::const_iterator + array::end() const EA_NOEXCEPT + { + return &mValue[N]; + } + + + template + EA_CPP14_CONSTEXPR inline typename array::const_iterator + array::cend() const EA_NOEXCEPT + { + return &mValue[N]; + } + + + template + EA_CPP14_CONSTEXPR inline typename array::reverse_iterator + array::rbegin() EA_NOEXCEPT + { + return reverse_iterator(&mValue[N]); + } + + + template + EA_CPP14_CONSTEXPR inline typename array::const_reverse_iterator + array::rbegin() const EA_NOEXCEPT + { + return const_reverse_iterator(&mValue[N]); + } + + + template + EA_CPP14_CONSTEXPR inline typename array::const_reverse_iterator + array::crbegin() const EA_NOEXCEPT + { + return const_reverse_iterator(&mValue[N]); + } + + + template + EA_CPP14_CONSTEXPR inline typename array::reverse_iterator + array::rend() EA_NOEXCEPT + { + return reverse_iterator(&mValue[0]); + } + + + template + EA_CPP14_CONSTEXPR inline typename array::const_reverse_iterator + array::rend() const EA_NOEXCEPT + { + return const_reverse_iterator(static_cast(&mValue[0])); + } + + + template + EA_CPP14_CONSTEXPR inline typename array::const_reverse_iterator + array::crend() const EA_NOEXCEPT + { + return const_reverse_iterator(static_cast(&mValue[0])); + } + + + template + EA_CPP14_CONSTEXPR inline typename array::size_type + array::size() const EA_NOEXCEPT + { + return (size_type)N; + } + + + template + EA_CPP14_CONSTEXPR inline typename array::size_type + array::max_size() const EA_NOEXCEPT + { + return (size_type)N; + } + + + template + EA_CPP14_CONSTEXPR inline bool array::empty() const EA_NOEXCEPT + { + return (N == 0); + } + + + template + EA_CPP14_CONSTEXPR inline typename array::reference + array::operator[](size_type i) + { + #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED + if (EASTL_UNLIKELY(i >= N)) + EASTL_FAIL_MSG("array::operator[] -- out of range"); + #elif EASTL_ASSERT_ENABLED + // We allow taking a reference to arr[0] + if (EASTL_UNLIKELY((i != 0) && i >= N)) + EASTL_FAIL_MSG("array::operator[] -- out of range"); + #endif + + return mValue[i]; + } + + + template + EA_CPP14_CONSTEXPR inline typename array::const_reference + array::operator[](size_type i) const + { + #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED + if (EASTL_UNLIKELY(i >= N)) + EASTL_FAIL_MSG("array::operator[] -- out of range"); + #elif EASTL_ASSERT_ENABLED + // We allow taking a reference to arr[0] + if (EASTL_UNLIKELY((i != 0) && i >= N)) + EASTL_FAIL_MSG("array::operator[] -- out of range"); + #endif + + return mValue[i]; + } + + + template + EA_CPP14_CONSTEXPR inline typename array::reference + array::front() + { + return mValue[0]; + } + + + template + EA_CPP14_CONSTEXPR inline typename array::const_reference + array::front() const + { + return mValue[0]; + } + + + template + EA_CPP14_CONSTEXPR inline typename array::reference + array::back() + { + return mValue[N - 1]; + } + + + template + EA_CPP14_CONSTEXPR inline typename array::const_reference + array::back() const + { + return mValue[N - 1]; + } + + + template + EA_CPP14_CONSTEXPR inline T* array::data() EA_NOEXCEPT + { + return mValue; + } + + + template + EA_CPP14_CONSTEXPR inline const T* array::data() const EA_NOEXCEPT + { + return mValue; + } + + + template + EA_CPP14_CONSTEXPR inline typename array::const_reference array::at(size_type i) const + { + #if EASTL_EXCEPTIONS_ENABLED + if(EASTL_UNLIKELY(i >= N)) + throw std::out_of_range("array::at -- out of range"); + #elif EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(i >= N)) + EASTL_FAIL_MSG("array::at -- out of range"); + #endif + + return static_cast(mValue[i]); + } + + + template + EA_CPP14_CONSTEXPR inline typename array::reference array::at(size_type i) + { + #if EASTL_EXCEPTIONS_ENABLED + if(EASTL_UNLIKELY(i >= N)) + throw std::out_of_range("array::at -- out of range"); + #elif EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(i >= N)) + EASTL_FAIL_MSG("array::at -- out of range"); + #endif + + return static_cast(mValue[i]); + } + + + template + inline bool array::validate() const + { + return true; // There is nothing to do. + } + + + template + inline int array::validate_iterator(const_iterator i) const + { + if(i >= mValue) + { + if(i < (mValue + N)) + return (isf_valid | isf_current | isf_can_dereference); + + if(i <= (mValue + N)) + return (isf_valid | isf_current); + } + + return isf_none; + } + + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + EA_CPP14_CONSTEXPR inline bool operator==(const array& a, const array& b) + { + return eastl::equal(&a.mValue[0], &a.mValue[N], &b.mValue[0]); + } + +#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON) + template + inline synth_three_way_result operator<=>(const array& a, const array& b) + { + return eastl::lexicographical_compare_three_way(&a.mValue[0], &a.mValue[N], &b.mValue[0], &b.mValue[N], synth_three_way{}); + } +#else + + template + EA_CPP14_CONSTEXPR inline bool operator<(const array& a, const array& b) + { + return eastl::lexicographical_compare(&a.mValue[0], &a.mValue[N], &b.mValue[0], &b.mValue[N]); + } + + + template + EA_CPP14_CONSTEXPR inline bool operator!=(const array& a, const array& b) + { + return !eastl::equal(&a.mValue[0], &a.mValue[N], &b.mValue[0]); + } + + + template + EA_CPP14_CONSTEXPR inline bool operator>(const array& a, const array& b) + { + return eastl::lexicographical_compare(&b.mValue[0], &b.mValue[N], &a.mValue[0], &a.mValue[N]); + } + + + template + EA_CPP14_CONSTEXPR inline bool operator<=(const array& a, const array& b) + { + return !eastl::lexicographical_compare(&b.mValue[0], &b.mValue[N], &a.mValue[0], &a.mValue[N]); + } + + + template + EA_CPP14_CONSTEXPR inline bool operator>=(const array& a, const array& b) + { + return !eastl::lexicographical_compare(&a.mValue[0], &a.mValue[N], &b.mValue[0], &b.mValue[N]); + } +#endif + + /////////////////////////////////////////////////////////////////////// + // non-member functions + /////////////////////////////////////////////////////////////////////// + + template + EA_NODISCARD EA_CONSTEXPR T& get(array& value) EA_NOEXCEPT + { + static_assert(I < N, "array index out of bounds"); + return value.mValue[I]; + } + + template + EA_NODISCARD EA_CONSTEXPR T&& get(array&& value) EA_NOEXCEPT + { + static_assert(I < N, "array index out of bounds"); + return move(value.mValue[I]); + } + + template + EA_NODISCARD EA_CONSTEXPR const T& get(const array& value) EA_NOEXCEPT + { + static_assert(I < N, "array index out of bounds"); + return value.mValue[I]; + } + + template + EA_NODISCARD EA_CONSTEXPR const T&& get(const array&& value) EA_NOEXCEPT + { + static_assert(I < N, "array index out of bounds"); + return move(value.mValue[I]); + } + + template + inline void swap(array& a, array& b) + { + eastl::swap_ranges(&a.mValue[0], &a.mValue[N], &b.mValue[0]); + } + + + /////////////////////////////////////////////////////////////////////// + // to_array + /////////////////////////////////////////////////////////////////////// + namespace internal + { + template + EA_CONSTEXPR auto to_array(T (&a)[N], index_sequence) + { + return eastl::array, N>{{a[I]...}}; + } + + template + EA_CONSTEXPR auto to_array(T (&&a)[N], index_sequence) + { + return eastl::array, N>{{eastl::move(a[I])...}}; + } + } + + template + EA_CONSTEXPR eastl::array, N> to_array(T (&a)[N]) + { + static_assert(eastl::is_constructible_v, "element type T must be copy-initializable"); + static_assert(!eastl::is_array_v, "passing multidimensional arrays to to_array is ill-formed"); + return internal::to_array(a, eastl::make_index_sequence{}); + } + + template + EA_CONSTEXPR eastl::array, N> to_array(T (&&a)[N]) + { + static_assert(eastl::is_move_constructible_v, "element type T must be move-constructible"); + static_assert(!eastl::is_array_v, "passing multidimensional arrays to to_array is ill-formed"); + return internal::to_array(eastl::move(a), eastl::make_index_sequence{}); + } + +#if EASTL_TUPLE_ENABLED + + /////////////////////////////////////////////////////////////////////// + // helper classes + /////////////////////////////////////////////////////////////////////// + + template + struct tuple_size> : public integral_constant {}; + + namespace internal { + template + struct tuple_element {}; + + template + struct tuple_element> { + using type = T; + }; + } + + template + struct tuple_element> : internal::tuple_element {}; + +#endif // EASTL_TUPLE_ENABLED +} // namespace eastl + +/////////////////////////////////////////////////////////////////////// +// C++17 structured bindings support for eastl::array +/////////////////////////////////////////////////////////////////////// + +#ifndef EA_COMPILER_NO_STRUCTURED_BINDING +// we can't forward declare tuple_size and tuple_element because some std implementations +// don't declare it in the std namespace, but instead alias it. +#include + +namespace std +{ + +template +struct tuple_size> : public integral_constant {}; + +template +struct tuple_element> : public eastl::tuple_element> {}; +} +#endif + + +EA_RESTORE_VC_WARNING(); + +#endif // Header include guard + + + + + + + + + + diff --git a/external/EASTL/include/EASTL/atomic.h b/external/EASTL/include/EASTL/atomic.h new file mode 100644 index 00000000..27117e9c --- /dev/null +++ b/external/EASTL/include/EASTL/atomic.h @@ -0,0 +1,1772 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_H +#define EASTL_ATOMIC_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// Below is the documentation of the API of the eastl::atomic library. +// This includes class and free functions. +// Anything marked with a '+' in front of the name is an extension to the std API. +// + + +///////////////////////////////////////////////////////////////////////////////// +// +// eastl::atomic memory_order API +// +// See below for full explanations on the memory orders and their guarantees. +// +// - eastl::memory_order_relaxed +// - eastl::memory_order_acquire +// - eastl::memory_order_release +// - eastl::memory_order_acq_rel +// - eastl::memory_order_seq_cst +// - +eastl::memory_order_read_depends +// + + +///////////////////////////////////////////////////////////////////////////////// +// +// eastl::atomic class API +// +// All jargon and prerequisite knowledge is explained below. +// +// Unless otherwise specified all orders except read_depends is a valid order +// on the given operation. +// Unless otherwise specified all operations are valid on all types T. +// If no order is provided, seq_cst memory ordering is used for the operation. +// +// - atomic() : Value-initializes the underlying object as T{}. +// +// - atomic(T) : Initializes the underlying object with a copy of T. +// +// - T operator=(T) : Atomically assigns T as store(T, seq_cst). +// +// - is_lock_free() : true if the operations are lockfree. Always true for eastl. +// +// - store(T, order) : Atomically stores T affecting memory according to order. +// : Valid orders are relaxed, release, and seq_cst. +// +// - T load(order) : Atomically loads T affecting memory according to order. +// : Valid orders are relaxed, acquire, and seq_cst. +// : If T is a pointer type, read_depends is another valid order. +// +// - operator T() : Atomically loads T as load(T, seq_cst). +// +// - T exchange(T, order) : Atomically performs a RMW that replaces the current value with T. +// : Memory is affected according to order. +// : Returns the previous value stored before the RMW operation. +// +// - bool compare_exchange_weak(T&, T, successOrder, failOrder) +// : Atomically compares the value stored with that of T& and if equal replaces it with T. +// : This is a RMW operation. +// : If the comparison fails, loads the observed value into T&. This is a load operation. +// : Memory is affected in the RMW operation according to successOrder. +// : Memory is affected in the load operation according to failOrder. +// : failOrder cannot be a stronger order than successOrder. +// : Returns true or false if the comparison succeeded and T was stored into the atomic object. +// : +// : The weak variant may fail even if the observed value of the atomic object equals T&. +// : This can yield performance gains on platforms with ld/str exclusive pair instructions especially +// : when the compare_exchange operation is done in a loop. +// : Only the bool return value can be used to determine if the operation was successful. +// +// - bool compare_exchange_weak(T&, T, order) +// : Same as the above except that order is used for both the RMW and the load operation. +// : If order == acq_rel then the order of the load operation equals acquire. +// : If order == release then the order of the load operation equals relaxed. +// +// - bool compare_exchange_strong(T&, T, successOrder, failOrder) +// - bool compare_exchange_strong(T&, T, order) +// : This operation is the same as the above weak variants +// : expect that it will not fail spuriously if the value stored equals T&. +// +// The below operations are only valid for Integral types. +// +// - T fetch_add(T, order) +// : Atomically performs a RMW that increments the value stored with T. +// : Returns the previous value stored before the RMW operation. +// - T fetch_sub(T, order) +// : Atomically performs a RMW that decrements the value stored with T. +// : Returns the previous value stored before the RMW operation. +// - T fetch_and(T, order) +// : Atomically performs a RMW that bit-wise and's the value stored with T. +// : Returns the previous value stored before the RMW operation. +// - T fetch_or(T, order) +// : Atomically performs a RMW that bit-wise or's the value stored with T. +// : Returns the previous value stored before the RMW operation. +// - T fetch_xor(T, order) +// : Atomically performs a RMW that bit-wise xor's the value stored with T. +// : Returns the previous value stored before the RMW operation. +// +// - +T add_fetch(T, order) +// : Atomically performs a RMW that increments the value stored with T. +// : Returns the new updated value after the operation. +// - +T sub_fetch(T, order) +// : Atomically performs a RMW that decrements the value stored with T. +// : Returns the new updated value after the operation. +// - +T and_fetch(T, order) +// : Atomically performs a RMW that bit-wise and's the value stored with T. +// : Returns the new updated value after the operation. +// - +T or_fetch(T, order) +// : Atomically performs a RMW that bit-wise or's the value stored with T. +// : Returns the new updated value after the operation. +// - +T xor_fetch(T, order) +// : Atomically performs a RMW that bit-wise xor's the value stored with T. +// : Returns the new updated value after the operation. +// +// - T operator++/--() +// : Atomically increments or decrements the atomic value by one. +// : Returns the previous value stored before the RMW operation. +// : Memory is affected according to seq_cst ordering. +// +// - T ++/--operator() +// : Atomically increments or decrements the atomic value by one. +// : Returns the new updated value after the RMW operation. +// : Memory is affected according to seq_cst ordering. +// +// - T operator+=/-=/&=/|=/^=(T) +// : Atomically adds, subtracts, bitwise and/or/xor the atomic object with T. +// : Returns the new updated value after the operation. +// : Memory is affected according to seq_cst ordering. +// +// +// The below operations are only valid for Pointer types +// +// - T* fetch_add(ptrdiff_t val, order) +// : Atomically performs a RMW that increments the value store with sizeof(T) * val +// : Returns the previous value stored before the RMW operation. +// - T* fetch_sub(ptrdiff_t val, order) +// : Atomically performs a RMW that decrements the value store with sizeof(T) * val +// : Returns the previous value stored before the RMW operation. +// +// - +T* add_fetch(ptrdiff_t val, order) +// : Atomically performs a RMW that increments the value store with sizeof(T) * val +// : Returns the new updated value after the operation. +// - +T* sub_fetch(ptrdiff_t val, order) +// : Atomically performs a RMW that decrements the value store with sizeof(T) * val +// : Returns the new updated value after the operation. +// +// - T* operator++/--() +// : Atomically increments or decrements the atomic value by sizeof(T) * 1. +// : Returns the previous value stored before the RMW operation. +// : Memory is affected according to seq_cst ordering. +// +// - T* ++/--operator() +// : Atomically increments or decrements the atomic value by sizeof(T) * 1. +// : Returns the new updated value after the RMW operation. +// : Memory is affected according to seq_cst ordering. +// +// +// - +EASTL_ATOMIC_HAS_[len]BIT Macro Definitions +// These macros provide the ability to compile-time switch on the availability of support for the specific +// bit width of an atomic object. +// Example: +// +// #if defined(EASTL_ATOMIC_HAS_128BIT) +// #endif +// +// Indicates the support for 128-bit atomic operations on an eastl::atomic object. +// + + +///////////////////////////////////////////////////////////////////////////////// +// +// eastl::atomic_flag class API +// +// Unless otherwise specified all orders except read_depends is a valid order +// on the given operation. +// +// - atomic_flag() : Initializes the flag to false. +// +// - clear(order) +// : Atomically stores the value false to the flag. +// : Valid orders are relaxed, release, and seq_cst. +// +// - bool test_and_set(order) +// : Atomically exchanges flag with true and returns the previous value that was held. +// +// - bool test(order) +// : Atomically loads the flag value. +// : Valid orders are relaxed, acquire, and seq_cst. +// + + +///////////////////////////////////////////////////////////////////////////////// +// +// eastl::atomic standalone free function API +// +// All class methods have a standalone free function that takes a pointer to the +// atomic object as the first argument. These functions just call the correct method +// on the atomic object for the given operation. +// These functions come in two variants, a non-explicit and an explicit variant +// that take on the form atomic_op() and atomic_op_explicit() respectively. +// The non-explicit variants take no order arguments and thus are all seq_cst. +// The explicit variants take an order argument. +// Only the standalone functions that do not have a class method equivalent pair will be +// documented here which includes all new extensions to the std API. +// +// - +compiler_barrier() +// : Read-Write Compiler Barrier. +// - +compiler_barrier_data_dependency(const T&) +// : Read-Write Compiler Barrier. +// : Applies a fake input dependency on const T& so the compiler believes said variable is used. +// : Useful for example when writing benchmark or testing code with local variables that must not get dead-store eliminated. +// - +cpu_pause() +// : Prevents speculative memory order violations in spin-wait loops. +// : Allows giving up core resources, execution units, to other threads while in spin-wait loops. +// - atomic_thread_fence(order) +// : Read docs below. +// - atomic_signal_fence(order) +// : Prevents reordering with a signal handler. +// - +atomic_load_cond(const eastl::atomic*, Predicate) +// : continuously loads the atomic object until Predicate is true +// : will properly ensure the spin-wait loop is optimal +// : very useful when needing to spin-wait for some condition to be true which is common is many lock-free algorithms +// : Memory is affected according to seq_cst ordering. +// - +atomic_load_cond_explicit(const eastl::atomic*, Predicate, Order) +// : Same as above but takes an order for how memory is affected +// + + +///////////////////////////////////////////////////////////////////////////////// +// +// Deviations from the standard. This does not include new features added: +// +// 1. +// Description: Atomics are always lock free +// Reasoning : We don't want people to fall into performance traps where implicit locking +// is done. If your user defined type is large enough to not support atomic +// instructions then your user code should do the locking. +// +// 2. +// Description: Atomic objects can not be volatile +// Reasoning : Volatile objects do not make sense in the context of eastl::atomic. +// Use the given memory orders to get the ordering you need. +// Atomic objects have to become visible on the bus. See below for details. +// +// 3. +// Description: Consume memory order is not supported +// Reasoning : See below for the reasoning. +// +// 4. +// Description: ATOMIC_INIT() macros and the ATOMIC_LOCK_FREE macros are not implemented +// Reasoning : Use the is_lock_free() method instead of the macros. +// ATOMIC_INIT() macros aren't needed since the default constructor value initializes. +// +// 5. +// Description: compare_exchange failure memory order cannot be stronger than success memory order +// Reasoning : Besides the argument that it ideologically does not make sense that a failure +// of the atomic operation shouldn't have a stricter ordering guarantee than the +// success of it; if that is required then just make the whole operation stronger. +// This ability was added and allowed in C++17 only which makes supporting multiple +// C++ versions harder when using the compiler provided intrinsics since their behaviour +// is reliant on the C++ version being compiled. Also makes it harder to reason about code +// using these atomic ops since C++ versions vary the behaviour. We have also noticed +// that versions of compilers that say they support C++17 do not properly adhere to this +// new requirement in their intrinsics. Thus we will not support this. +// +// 6. +// Description: All memory orders are distinct types instead of enum values +// Reasoning : This will not affect how the API is used in user code. +// It allows us to statically assert on invalid memory orders since they are compile-time types +// instead of potentially runtime enum values. +// Allows for more efficient code gen without the use of switch statements or if-else conditionals +// on the memory order enum values on compilers that do not provide intrinsics that take in a +// memory order, such as MSVC, especially in debug and debug-opt builds. +// + + +///////////////////////////////////////////////////////////////////////////////// +// +// ******** DISCLAIMER ******** +// +// This documentation is not meant to provide rigorous proofs on the memory models +// of specific architectures or the C++ memory model introduced in C++11. It is not +// meant to provide formal mathematical definitions and logic that shows that a given +// implementation adheres to the C++ memory model. This isn't meant to be some infallible +// oracle on memory models, barriers, observers, and architecture implementation details. +// What I do hope a reader gets out of this is the following. An understanding of the C++ +// memory model and how that relates to implementations on various architectures. Various +// phenomena and ways that compilers and architectures can steer away from a sequentially +// consistent system. To provide examples on how to use this library with common patterns +// that will be seen in many code bases. Lastly I would like to provide insight and +// further readings into the lesser known topics that aren't shared outside people +// who live in this space and why certain things are done the way they are +// such as cumulativity of memory barriers as one example. Sometimes specifying barriers +// as LDLD/LDST/STST/STLD doesn't actually cut it, and finer grain semantics are needed +// to describe cumulativity of memory barriers. +// +// ******** Layout of the Documentation ******** +// +// This document will first go through a variety of different hardware architectures with examples of the various kinds of +// reordering that is allowed by these architectures. We will use the memory barriers provided by the hardware to "fix" these +// examples. +// Then we will introduce the C++ memory model and revisit the examples using the platform agnostic abstract memory model to "fix" +// them. +// The hope here is that we get a sense of the various types of architectures and weak memory consistency provided by them and thus +// an appreciation for the design of the C++ abstract memory model. +// +// ******** REFERENCES ******** +// [1] Dekker's mutual exclusion algorithm made RW-safe +// [2] Handling Memory Ordering in Multithreaded Applications with Oracle Solaris +// [3] Evaluating the Cost of Atomic Operations on Modern Architectures +// [4] A Tutorial Introduction to the ARM and POWER Relaxed Memory Models +// [5] Memory Barriers: a Hardware View for Software Hackers +// [6] Memory Model = Instruction Reordering + Store Atomicity +// [7] ArMOR: Defending Against Memory Consistency Model Mismatches in Heterogeneous Architectures +// [8] Weak Memory Models: Balancing Definitional Simplicity and Implementation Flexibility +// [9] Repairing Sequential Consistency in C/C++11 +// [10] A high-level operational semantics for hardware weak memory models +// [11] x86-TSO: A Rigorous and Usable Programmer's Model for x86 Multiprocessors +// [12] Simplifying ARM Concurrency: Multicopy-Atomic Axiomatic and Operational Models for ARMv8 +// [13] Mixed-size Concurrency: ARM, POWER, C/C++11, and SC +// [14] P0668R4: Revising the C++ memory model +// [15] Constructing a Weak Memory Model +// [16] The Superfluous Load Queue +// [17] P0190R1: Proposal for New memory_order_consume Definition +// +// ******** What does it mean to be Atomic? ******** +// +// The word atomic has been overloaded and can mean a lot of different things depending on the context, +// so let's digest it. +// +// The first attribute for something to be atomic is that concurrent stores and loads +// must not tear or shear. This means if two threads write 0x01 and 0x02 at the same time +// then the only values that should ever be observed is 0x01 or 0x02. We can only see +// the whole write of 0x01 or 0x02, not 0x03 as an example. Many algorithms rely on +// this property; only very few such a Dekker's algorithm for mutual exclusion don't. +// Well actually a recent paper, [1], showed that Dekker's isn't safe without atomic +// loads and stores so this property is pretty fundamental and also hard to prove that +// your algorithm is safe without this property on loads and stores. +// +// We need to ensure the compiler emits a single load instruction. +// If we are doing 64-bit loads on a 32-bit platform, we need to ensure the load is one +// instruction instead of 2 32-bit loads into two registers. +// Another example is if we have this struct, struct { int32_t i; int32_t k; }, even on +// a 64-bit system we have to ensure the compiler does one 64-bit load and not two +// 32-bit loads for each individual member. +// +// We also need to ensure the correct instruction is emitted. A general load instruction +// to do a 64-bit load on a 32-bit platform may perform a 64-bit load but it may not +// be atomic, it may be turned into two 32-bit loads behind the scenes in the cpu. +// For example on ARMv7 we would have to use ldrexd not ldrd for 64-bit loads +// on a 32-bit ARMv7 core. +// +// An operation may be considered atomic if multiple sub-operations are done as one +// transactional unit. This is commonly known as a Read-Modify-Write, RMW, operation. +// Take a simple add operation; it is actually a load from memory into a register, +// a modification of said register and then a store back to memory. If two threads +// concurrently execute this add operation on the same memory location; any interleaving +// of the 3 sub-operations is possible. It is possible that if the initial value is 0, +// the result may be 1 because each thread executed in lockstep both loading 0, adding 1 +// and then storing 1. A RMW operation may be considered atomic if the whole sequence of +// sub-operations are serialized as one transactional unit. +// +// Atomicity may also refer to the order in which memory operations are observed and the +// dependencies between memory operations to different memory locations. As a quick example +// into the very thing we will be deep diving into that is not very intuitive. If I do, [STORE(A, 2); STORE(B, 1);], +// in one thread and another thread does, [r0 = LOAD(B); r1 = LOAD(A);]; if r0 == 1, thus we observed +// the store to B, will we observe r1 == 2. Our intuition tells us that well A was stored +// first and then B, so if I read the new value of B then I must also read the new value +// of A since the store to A happened before B so if I can see B then I must be able to +// see everything before B which includes A. +// This highlights the ordering of memory operations and why memory barriers and memory +// models are so heavily attached to atomic operations because one could classify something +// is atomic if the dependency highlighted in the above example is allowed to be maintained. +// +// This is what people mean when you hear that volatile does NOT mean atomicity of the operation. +// Usually people imply a lot of implicit assumptions when they mark a variable as volatile. +// All volatile gives us is the ability to tell the compiler it may not assume anything +// about the state of that memory location. This means the compiler must always emit a load +// or store instruction, cannot perform constant folding, dead-store elimination, or +// do any sort of code movement on volatile variables. +// +// ******** Preliminary Basics ******** +// +// It is expected that the reader understands what a cache is, how it is organized and how data +// is chunked into cachelines. It is helpful if the reader understands basic cache coherency +// protocols such as MSI or MESI. +// It is expected the reader understands alignment, especially natural alignment +// of the processor and why alignment is important for data access. +// The reader should have some understanding of how a processor executes instructions, +// basics of what Out-of-Order execution means and basics of what speculative execution means. +// It is expected that the reader has an understanding of threading, multi-threaded programming +// and the use of concurrency primitives such as mutexes. +// Memory Barrier, Barrier, Memory Fence and Fence are all interchangeable synonyms. +// +// Independent memory operations can be performed or observed, depending on your perspective, +// in any order as long as the local cpu thinks its execution is happening in program order. +// This can be a problem for inter-cpu communications and thus we need some way to enforce +// that the compiler does not reorder instructions and that the cpu also does not reorder +// instructions. This is what a barrier is, it is an enforcement of ordering on memory instructions, +// so as the name suggests a barrier. Barriers can be one-sided or both-sided which means +// the barrier enforces a partial order above or below or on both sides of said barrier. +// +// Processors will use tricks such as out-of-order execution, memory instruction buffering and +// combining, speculative loads and speculative execution, branch prediction and many types of caching even +// in various interconnects from the cpu to the memory itself. One key thing to note is that cpus +// do not physically reorder the instruction stream. Instructions are dispatched and retired +// in-order but executed out-of-order. Memory barriers will prevent these tricks from happening +// by controlling the interaction of multiple cpus. +// +// Compilers will morph your code and physically move instructions around as long as the program +// has the same observed behaviour. This is becoming increasingly true with more optimization techniques +// such as Link Time Optimization becoming the norm where once people assumed compilers couldn't assume +// something outside the given TU and now because they have the whole program view they know everything. +// This means the compiler does indeed alter the instruction stream +// and compiler barriers are a way to tell them to not move any memory instructions across the barrier. +// This does not prevent a compiler from doing optimizations such as constant folding, merging of +// overlapping loads, or even dead store elimination. Compiler barriers are also very cheap and +// have zero impact on anything that the compiler knows isn't visible in memory such as local variables +// whose addresses do not escape the function even if their address is taken. You can think of it +// in terms of a sequence point as used with "volatile" qualified variables to denote a place in code where +// things must be stable and the compiler doesn't cache any variables in registers or do any reordering. +// +// Memory Barriers come in many flavours that instill a partial or full ordering on memory operations. +// Some memory operations themselves have implicit ordering guarantees already, for example +// Total-Store Order, TSO, architectures like x86 guarantee that a store operation cannot be reordered with a +// previous store operation thus a memory barrier that only orders stores is not needed +// on this architecture other than ensuring the compiler doesn't do any shenanigans. +// Considering we have 4 permutations of memory operations; a common way to describe an ordering +// is via Load-Load/LDLD, Load-Store/LDST, Store-Store/STST or Store-Load/STLD notation. You read this +// notation as follows; STLD memory barrier means a load cannot be reordered with a previous store. +// For example, on TSO architecture we can say all stores provide a STST memory barrier, +// since a store cannot be reordered with a previous store. +// +// Memory Barriers in itself are not a magic bullet, they come with caveats that must be known. +// Each cpu architecture also has its own flavours and guarantees provided by said memory barriers. +// There is no guarantee that memory instructions specified before a memory barrier will complete, +// be written to memory or fully propagated throughout the rest of the system, when the memory barrier +// instruction completes. The memory barrier creates a point in that local cpus queue of memory instructions +// whereby they must not cross. There is no guarantee that using a memory barrier on one cpu will have +// any effect at all on another remote cpu's observed view of memory. This also implies that executing +// a memory barrier does not hinder, incur, stall or enforce any other cpus to serialize with each other cpu. +// In order for a remote cpu to observe the correct effects it must also use a matching memory barrier. +// This means code communicating in 2 threads through memory must both be employing the use of memory barriers. +// For example, a store memory barrier that only orders stores, STST, in one thread must be paired with a load memory barrier +// that only orders loads, LDLD, in the other thread trying to observe those stores in the correct order. +// +// ******** Memory Types && Devices ******** +// +// eastl::atomic and accompanying memory barriers ONLY ORDER MEMORY to cpu-to-cpu communication through whatever the +// processor designates as normal cacheable memory. It does not order memory to devices. It does not provide any DMA ordering guarantees. +// It does not order memory with other memory types such as Write Combining. It strictly orders memory only to shared memory that is used +// to communicate between cpus only. +// +// ******** Sequentially Consistent Machine ******** +// +// The most intuitive as well as the model people naturally expect a concurrent system to have is Sequential Consistency. +// You may have or definitely have heard this term if you dealt with any type of distributed system. Lamport's definition +// articulates this consistency model the best. +// Leslie Lamport: "the result of any execution is the same as if the operations of all the processors were executed in some +// sequential order, and the operations of each individual processor appear in this sequence in the order +// specified by its program". +// +// A Sequentially Consistent machine is modelled as follows: +// +// ------------ ------------ +// | Thread 0 | ... | Thread N | +// ------------ ------------ +// | | | | +// | | | | +// ---------------------------------------- +// | | +// | Shared Memory | +// | | +// ---------------------------------------- +// +// This is a sequentially consistent machine. Each thread is executing instructions in program order which does loads and stores +// that are serialized in some order to the shared memory. This means all communication is done through the shared memory with one cpu +// doing one access at a time. This system has a couple key properties. +// +// 1. There is no local cpu memory reordering. Each cpu executes instructions in program order and all loads and stores must complete, +// be visible in the shared memory or be visible in a register before starting the next instruction. +// 2. Each memory operation becomes visible to all cpus at the same time. If a store hits the shared memory, then all subsequent loads +// from every other cpu will always see the latest store. +// +// A Sequentially Consistent machine has, Single-Copy Store Atomicity: All stores must become visible to all cores in the system at the same time. +// +// ******** Adding Caches ******** +// +// Caches by nature implicitly add the potential for memory reordering. A centralized shared snoopy bus that we all learned in school +// makes it easy to implement sequential consistency with caches. Writes and reads are all serialized in a total order via the cache bus transaction +// ordering. Every modern day bus is not inorder, and most certainly not a shared centralized bus. Cache coherency guarantees that all memory operations +// will be propagated eventually to all parties, but it doesn't guarantee in what order or in what time frame. Once you add +// caches, various levels of caching and various interconnects between remote cpus, you inevitably run into the issue where +// some cpus observe the effects of a store before other cpus. Obviously we have weakly-ordered and strongly-ordered cpus with +// caches so why is that? The short answer is, where is the onus put, is it on the programmer or the hardware. Does the hardware +// have dependency tracking, is it able to determine when a memory order violation occurs such as rolling back its speculative execution +// and also how far along the chain of interconnects does the hardware wait before it determines that the memory operation has +// been acknowledged or is considered to satisfy its memory ordering guarantees. Again this is a very high level view of the system +// as a whole, but the takeaway is yes; caches do add the potential for reordering but other supporting hardware determines whether +// that is observable by the programmer. There is also some debate whether weakly-ordered processors are actually more performant +// than strongly-ordered cpus eluding to the fact that the hardware has a better picture of what is a violation versus the programmer +// having to emit far more barriers on weakly-ordered architectures in multi-threaded code which may actually not be needed because the +// hardware didn't commit a violation but it may have and we as the programmer cannot rely on may haves. +// +// ******** Store Buffers ******** +// +// Obviously having all stores serialize results in unnecessary stalls. Store buffers alleviate this issue. +// Store buffers are simple fixed size structures that sit between the cpu and the memory hierarchy. This allows +// each cpu to record its write in the store buffer and then move onto the next instruction. The store buffer will +// eventually be flushed to the resulting memory hierarchy in FIFO order. How and when this flushing occurs is irrelevant to the +// understanding of a store buffer. A read from an address will grab the most recent write to the same address in the store buffer. +// +// The introduction of a store buffer is our first dive into weaker memory consistency. The addition of this hardware turns the consistency model weaker, +// into one that is commonly known as TSO, Total-Store Order. This is the exact model used by x86 cpus and we will see what this means +// and what new effects are observed with the addition of the store buffer. Below is a diagram of how the machine may now look. +// This type of store buffer is known as a FIFO store buffer, FIFO write buffer, or Load/Store Queue in some literature. This type of +// store buffer introduces STLD reordering but still prevents STST reordering. We will take a look at another type of store buffer later. +// Even with this store buffer, stores to the same address can still be merged so that only the latest store is written to the cache assuming +// no other intermediary stores happen. x86 cpus do write merging even for consecutive stores, i.e. storing to A and A+1 can be merged into one two-byte store. +// +// ------------ ------------ +// | Thread 0 | ... | Thread N | +// ------------ ------------ +// | | | | +// | | | | +// | Store | | Store | +// | Buffer | | Buffer | +// | | | | +// ---------------------------------------- +// | | +// | Shared Memory | +// | | +// ---------------------------------------- +// +// ---- Store-Buffering / Dekker's Example ---- +// This is a very common litmus test that showcases the introduction of STLD reordering. It is called Store-Buffering example because it is the only weaker +// behaviour observed under TSO and also called Dekker's Example as it famously breaks Dekker's mutual exclusion algorithm. +// +// --------------------------- +// Initial State: +// x = 0; y = 0; +// --------------------------- +// Thread 0 | Thread 1 +// --------------------------- +// STORE(x, 1) | STORE(y, 1) +// r0 = LOAD(y) | r1 = LOAD(x) +// --------------------------- +// Observed: r0 = 0 && r1 = 0 +// --------------------------- +// +// We would normally assume that any interleaving of the two threads cannot possibly end up with both loads reading 0. We assume that the observed outcome +// of r0 = 0 && r1 = 0 to be impossible, clearly that is not the case. Let's start by understanding the example with no reordering possible. Both threads +// run and their first instruction is to write the value 1 into either x or y, the next instruction then loads from the opposite variable. This means no +// matter the interleaving, one of the loads always executes after the other thread's store to that variable. +// We could observe r0 = 1 && r1 = 1 if both threads execute in lockstep. +// We could observe r0 = 0 && r1 = 1 if thread 0 executes and then thread 1 executes. +// We could observe r0 = 1 && r1 = 0 if thread 1 executes and then thread 0 executes. +// Since the stores always execute before that load in the other thread, one thread must always at least observe a store, so let's see why store buffers break this. +// +// What will happen is that STORE(x, 1) is stored to the store buffer but not made globally visible yet. +// STORE(y, 1) is written to the store buffer and also is not made globally visible yet. +// Both loads now read the initial state of x and y which is 0. We got the r0 = 0 && r1 = 0 outcome and just observed a Store-Load reordering. +// It has appeared as if the loads have been reordered with the previous stores and thus executed before the stores. +// Notice even if we execute the instructions in order, a series of other hardware side effects made it appear as if the instructions have been reordered. +// We can solve this by placing a Store-Load barrier after the store and before the load as follows. +// +// --------------------------- +// Thread 0 | Thread 1 +// --------------------------- +// STORE(x, 1) | STORE(y, 1) +// STLD BARRIER | STLD BARRIER +// r0 = LOAD(y) | r1 = LOAD(x) +// --------------------------- +// +// This STLD barrier effectively will flush the store buffer into the memory hierarchy ensuring all stores in the buffer are visible to all other cpus at the same time +// before executing the load instruction. Again nothing prevents a potential hardware from speculatively executing the load even with the STLD barrier, the hardware will have to do +// a proper rollback if it detected a memory order violation otherwise it can continue on with its speculative load. The barrier just delimits a stability point. +// +// Most hardware does not provide granular barrier semantics such as STLD. Most provide a write memory barrier which only orders stores, STST, a read memory barrier +// which only orders loads, LDLD, and then a full memory barrier which is all 4 permutations. So on x86 we will have to use the mfence, memory fence, instruction +// which is a full memory barrier to get our desired STLD requirements. +// +// TSO also has the property that we call, Multi-Copy Store Atomicity. This means a cpu sees its own stores before they become visible to other cpus, +// by forwarding them from the store buffer, but a store becomes visible to all other cpus at the same time when flushed from the store buffer. +// +// +// Let's look at a non-FIFO store buffer now as seen in ARM cpus as an example and we will use a standard Message Passing example to see how it manifests in even weaker consistency. +// A store buffer on ARM as an example allows write merging even with adjacent stores, is not a FIFO queue, any stores in the small hardware hash table may be ejected at any point +// due to a collision eviction or the availability of cachelines in the cache hierarchy meaning that stores may bypass the buffer entirely if that cacheline is already owned by that cpu. +// There is no guarantee that stores will be completed in order as in the FIFO case. +// +// --------------------------- +// Initial State: +// x = 0; y = 0; +// --------------------------- +// Thread 0 | Thread 1 +// --------------------------- +// STORE(x, 1) | while(LOAD(y) == 0); +// STORE(y, 1) | r0 = LOAD(x) +// --------------------------- +// Observed: r0 = 0 +// --------------------------- +// +// This is a classic Message Passing example that is very commonly used in production code. We store some values and then set a flag, STORE(y, 1) in this case. +// The other thread waits until the flag is observed and then reads the value out of x. If we observed the flag then we should obviously see all stores before the flag was set. +// Given our familiarity with TSO consistency above we know this definitely works on TSO and it is impossible to observe the load of x returning 0 under that consistency model. +// Let's see how this breaks with a non-FIFO store buffer. +// +// Thread 0 executes the STORE(x, 1) but the cacheline for x is not in thread 0's cache so we write to the store buffer and wait for the cacheline. +// Thread 1 executes the LOAD(y) and it also does not have y in its cacheline so it waits before completing the load. +// Thread 0 moves on to STORE(y, 1). It owns this cacheline, hypothetically, so it may bypass the store buffer and store directly to the cache. +// Thread 0 receives a message that Thread 1 needs y's cacheline, so it transfers the now modified cacheline to Thread 1. +// Thread 1 completes the load with the updated value of y = 1 and branches out of the while loop since we saw the new value of y. +// Thread 1 executes LOAD(x) which will return 0 since Thread 0 still hasn't flushed its store buffer waiting for x's cacheline. +// Thread 0 receives x's cacheline and now flushes x = 1 to the cache. Thread 1 will also have invalidated its cacheline for x that it brought in via the previous load. +// +// We have now fallen victim to STST reordering, allowing Thread 1 to observe a load of x returning 0. Not only does this store buffer allow STLD reordering due to the nature of +// buffering stores, but it also allows another reordering; that of Store-Store reordering. It was observed as if Thread 0 executed STORE(y, 1) before STORE(x, 1) which completely +// broke our simple message passing scenario. +// +// --------------------------- +// Thread 0 | Thread 1 +// --------------------------- +// STORE(x, 1) | while(LOAD(y) == 0); +// STST BARRIER | +// STORE(y, 1) | r0 = LOAD(x) +// --------------------------- +// +// The STST memory barrier effectively ensures that the cpu will flush its store buffer before executing any subsequent stores. That is not entirely true, the cpu is still allowed +// to continue and execute stores to the store buffer as long as it doesn't flush them to the cache before the previous stores are flushed to the cache. If nothing becomes +// globally visible out of order then we are good. +// The example above will change how the processor executes due to the STST memory barrier. Thread 0 will execute STORE(y, 1), write to the store buffer and mark all current entries. Even though it owns the cacheline +// it cannot write the store to the cache until all marked entries, which are all the previous stores, are flushed to the cache. We have now fixed the message passing code by adding +// a STST or write memory barrier and thus it is no longer possible to observe the load of x returning 0. +// +// ******** Invalidation Queues ******** +// +// Due to the cache coherency protocol in play, a write to a cacheline will have to send invalidation messages to all other cpus that may have that cacheline as well. +// Immediately executing and responding to invalidation messages can cause quite a stall especially if the cache is busy at the moment with other requests. +// The longer we wait to invalidate the cacheline, the longer the remote cpu doing the write is stalled waiting on us. We don't like this very much. +// Invalidation Queues are just that, we queue up the action of actually invalidating the cacheline but immediately respond to the request saying we did it anyway. +// Now the remote cpu thinks we invalidated said cacheline but actually it may very well still be in our cache ready to be read from. We just got weaker again, let's +// see how this manifests in code by starting from the end of our previous example. +// +// --------------------------- +// Initial State: +// x = 0; y = 0; +// --------------------------- +// Thread 0 | Thread 1 +// --------------------------- +// STORE(x, 1) | while(LOAD(y) == 0); +// STST BARRIER | +// STORE(y, 1) | r0 = LOAD(x) +// --------------------------- +// Observed: r0 = 0 +// --------------------------- +// +// Thread 1 receives the invalidate x's cacheline message and queues it because it is busy. +// Thread 1 receives the invalidate y's cacheline message, but we don't have that cacheline so acknowledge immediately. +// Thread 1 executes LOAD(y), loads in y's cacheline and branches out of the loop. +// Thread 1 executes LOAD(x), and loads from the cache the old value of x because the invalidation message is still sitting in the invalidation queue. +// +// We have just again observed the load of x returning 0 but from a different type of reordering now on the reader side. +// This is a form of LDLD, Load-Load, reordering as it appears as if LOAD(x) was executed before LOAD(y). This can be fixed as follows. +// +// --------------------------- +// Thread 0 | Thread 1 +// --------------------------- +// STORE(x, 1) | while(LOAD(y) == 0); +// STST BARRIER | LDLD BARRIER +// STORE(y, 1) | r0 = LOAD(x) +// --------------------------- +// +// The LDLD memory barrier essentially marks all entries currently in the invalidation queue. Any subsequent load must wait until all the marked entries have been +// processed. This ensures once we observe y = 1, we process all entries that came before y and that way we observe all the stores that happened before y. +// The insertion of the read memory barrier creates the required memory barrier pairing as discussed above and ensures that now our code executes as expected. +// +// It must be made clear that these are not the only hardware structure additions or ways that can relax STST, STLD and LDLD orderings. These are merely +// 2 structures that are common and ones that I choose to use as examples of how hardware can reduce ordering guarantees. Knowing how the hardware does this +// isn't always entirely clear but having a model that tells us what operations can be reordered is all we need to be able to reason about our code when executing on that hardware. +// +// ******** Load Buffering ******** +// +// The analog of the Store Buffering example, this litmus test has two threads read from two different locations and then write to the other locations. +// The outcome of having LDST reordering is allowed and observable on many processors such as ARM. +// +// --------------------------- +// Initial State: +// x = 0; y = 0; +// --------------------------- +// Thread 0 | Thread 1 +// --------------------------- +// r0 = LOAD(x) | r1 = LOAD(y) +// STORE(y, 1) | STORE(x, 1) +// --------------------------- +// Observed: r0 = 1 && r1 = 1 +// --------------------------- +// +// This is possible because the processor does not have to wait for the other cpu's cacheline to arrive before storing into the cache. +// Assume Thread 0 owns y's cacheline and Thread 1 owns x's cacheline. +// The processor may execute the load and thus buffer the load waiting for the cacheline to arrive. +// The processor may continue onto the store and since each cpu owns their respective cacheline, store the result into the cache. +// The cpus now receive the cachelines for x and y with the now modified value. +// We have just observed the loads returning 1 and thus observed LDST reordering. +// +// To forbid such outcome it suffices to add any full memory barrier to both threads or a local Read-After-Write/Read-To-Write dependency or a control dependency. +// +// ------------------------------- +// Thread 0 | Thread 1 +// ------------------------------- +// r0 = LOAD(x) | r1 = LOAD(y) +// if (r0 == 1) | if (r1 == 1) +// STORE(y, 1) | STORE(x, 1) +// ------------------------------- +// +// ----------------------------------------------------- +// Thread 0 | Thread 1 +// ----------------------------------------------------- +// r0 = LOAD(x) | r1 = LOAD(y) +// STORE(&(y + r0 - r1), 1) | STORE(&(x + r1 - r1), 1) +// ----------------------------------------------------- +// +// Both fixes above ensure that both writes cannot be committed, made globally visible, until their program source code order preceding reads have been fully satisfied. +// +// ******** Compiler Barriers ******** +// +// Compiler barriers are both-sided barriers that prevent loads and stores from moving down past the compiler barrier and +// loads and stores from moving up above the compiler barrier. Here we will see the various ways our code may be subject +// to compiler optimizations and why compiler barriers are needed. Note as stated above, compiler barriers may not +// prevent all compiler optimizations or transformations. Compiler barriers are usually implemented by reloading all +// variables that are currently cached in registers and flushing all stores in registers back to memory. +// This list isn't exhaustive but will hopefully try to outline what compiler barriers protect against and what they don't. +// +// Compiler may reorder loads. +// LOAD A; LOAD B; -> LOAD B; LOAD A; +// LOAD A; operation on A; LOAD B; operation on B; -> LOAD A; LOAD B; operation on A; operation on B +// +// Insert a compiler barrier in between the two loads to guarantee that they are kept in order. +// LOAD A; COMPILER_BARRIER; LOAD B; +// LOAD A; operation on A; COMPILER_BARRIER; LOAD B; operation on B; +// +// The same with stores. +// STORE(A, 1); STORE(B, 1); -> STORE(B, 1); STORE(A, 1); +// operations and STORE result into A; operations and STORE result int B; -> all operations; STORE result into B; STORE result into A; +// +// Insert a compiler barrier in between the two stores to guarantee that they are kept in order. +// It is not required that the multiple stores to A before the barrier are not merged into one final store. +// It is not required that the store to B after the barrier be written to memory, it may be cached in a register for some indeterminate +// amount of time as an example. +// STORE(A, 1); COMPILER_BARRIER; STORE(B, 1); +// +// The compiler is allowed to merge overlapping loads and stores. +// Inserting a compiler barrier here will not prevent the compiler from doing this optimization as doing one wider load/store is +// technically still abiding by the guarantee that the loads/stores are not reordered with each other. +// LOAD A[0]; LOAD A[1]; -> A single wider LOAD instruction +// STORE(A[0], 1); STORE(A[1], 2); -> A single wider STORE instruction +// +// Compilers do not have to reload the values pointers point to. This is especially common with RISC architectures with lots +// of general purpose registers or even compiler optimizations such as inlining or Link-Time Optimization. +// int i = *ptr; Do bunch of operations; if (*ptr) { do more; } +// It is entirely possible the compiler may remove the last if statement because it can keep the *ptr in a register +// and it may infer from the operations done on i that i is never 0. +// +// int i = *ptr; Do bunch of operations; COMPILER_BARRIER; if (*ptr) { do more; } +// Inserting a compiler barrier at that location will cause the compiler to have reload *ptr thus keeping the if statement assuming +// no other optimizations take place, such as the compiler knowing that *ptr is always greater than 0. +// +// The compiler is within its rights to also merge and reload loads as much as it pleases. +// +// while (int tmp = LOAD(A)) +// process_tmp(tmp) +// +// Will be merged and transformed to +// +// if (int tmp = LOAD(A)) +// for (;;) process_tmp(tmp) +// +// Inserting a compiler barrier will ensure that LOAD(A) is always reloaded and thus the unwanted transformation is avoided. +// +// while (int tmp = LOAD(A)) +// { +// process_tmp(tmp) +// COMPILER_BARRIER +// } +// +// Under heavy register pressure scenarios, say the loop body was larger, the compiler may reload A as follows. +// Compiler barriers cannot prevent this from happening, even if we put it after process_tmp as above; +// the compiler still kept those loads above the barrier so it satisfied its contract even though it reloaded +// from A more than once. +// +// while (int tmp = LOAD(A)) +// process_tmp(LOAD(A)) +// +// In the above transformation it is possible that another cpu stores 0 into A. When we reload A for process_tmp, we pass 0 +// to process_tmp() which it would actually never expect to observe. Because if we observed 0, the while loop condition +// would never be satisfied. If the compiler under register pressure instead stored and loaded tmp from its stack slot, that is fine +// because we are just storing and loading the original observed value from A. Obviously that is slower than just reloading from +// A again so an optimizing compiler may not do the stack slot store. This is an unwanted transformation which eastl::atomic prevents +// even on relaxed loads. +// +// The compiler is allowed to do dead-store elimination if it knows that value has already been stored, or that only the last store +// needs to be stored. The compiler does not assume or know that these variables are shared variables. +// +// STORE(A, 1); STORE(A, 1); +// OPERATIONS; -> OPERATIONS; +// STORE(A, 1); +// +// The compiler is well within its rights to omit the second store to A. Assuming we are doing some fancy lockfree communication +// with another cpu and the last store is meant to ensure the ending value is 1 even if another cpu changed A in between; that +// assumption will not be satisfied. A compiler barrier will not prevent the last store from being dead-store removed. +// +// STORE(A, 1); +// OPERATIONS; +// STORE(A, 2); +// +// Assuming these stores are meant to denote some state changes to communicate with a remote cpu. The compiler is allowed to +// transform this as follows without a compiler barrier. Insert a compiler barrier between the two stores to prevent the transformation. +// Something like this will also require memory barriers, but that is not the point of this section. +// +// STORE(A, 2); +// OPERATIONS; +// +// The compiler is also allowed to invent stores as it may please. +// First on many RISC architectures storing an immediate value either involves loading the immediate from the .data section +// or combing a variety of load upper immediate and add or or immediate instructions to get our constant in a register and then +// doing a single 32-bit store instruction from said register. Some ISAs have 16-bit stores with immediate value so that a store +// may be broken into 2 16-bit store immediate values causing shearing. To reduce instruction dependencies it may also decide +// to do two add immediates and then two 16-bit stores again causing shearing. +// +// lui $t0, 1 # t0 == 0x00010000 +// ori $a0, $t0, 8 # t0 == 0x00010008 +// strw $t0, 0($a1) # store t0 into address at a1 +// -> +// ori $a0, $t0, 1 # t0 == 0x00000001 +// ori $a0, $t1, 8 # t0 == 0x00000008 +// strhw $t0, 0($a1) # store t0 lower half at a1 +// strhw $t1, 2($a1) # store t1 upper half at a1 +// +// The above shows a potential transformation that a compiler barrier cannot solve for us. +// +// A compiler may also introduce stores to save on branching. Let's see. +// +// if (a) +// STORE(X, 10); +// else +// STORE(X, 20); +// +// STORE(X, 20); +// if (a) +// STORE(X, 10); +// +// This is a very common optimization as it saves a potentially more expensive branch instruction but breaks multi-threaded code. +// This is also another case where a compiler barrier doesn't give us the granularity we need. +// The branches may even be completely removed with the compiler instead choosing to use conditional move operations which would +// actually be compliant since there would be one store only done, an extra store wouldn't have been added. +// +// You are now probably thinking that compiler barriers are useful and are definitely needed to tell the compiler to calm down +// and guarantee our hardware guarantees are valid because the code we wrote is the instructions that were emitted. +// But there are definitely lots of caveats where compiler barriers do not at all provide the guarantees we still need. +// This where eastl::atomic comes into play, and under the relaxed memory ordering section it will be explained +// what the standard guarantees and how we achieve those guarantees, like ensuring the compiler never does dead-store elimination or reloads. +// +// ******** Control Dependencies ******** +// +// Control dependencies are implicit local cpu ordering of memory instructions due to branching instructions, specifically +// only conditional branches. The problem is compilers do not understand control dependencies, and control dependencies +// are incredibly hard to understand. This is meant to make the reader aware they exist and to never use them +// because they shouldn't be needed at all with eastl::atomic. Also control dependencies are categorized as LDLD or LDST, +// store control dependencies inherently do not make sense since the conditional branch loads and compares two values. +// +// A LDLD control dependency is an anti-pattern since it is not guaranteed that any architecture will detect the memory-order violation. +// r0 = LOAD(A); +// if (r0) +// r1 = LOAD(B) +// +// Given those sequence of instructions, it is entirely possible that a cpu attempts to speculatively predict and load the value of B +// before the branch instruction has finished executing. It is entirely allowed that the cpu loads from B, assume B is in cache and A +// is not in cache, before A. It is allowed, that even if the cpu was correct in it's prediction that it doesn't reload B and change the +// fact that it speculatively got lucky. +// +// This is also what the x86 pause instruction inserted into spin wait loops is meant to solve. +// LOOP: +// r0 = LOAD(A); +// if (!r0) pause; goto LOOP; +// +// In the above spin loop, after a couple of iterations the processor will fill the pipeline with speculated cmp and load instructions. +// x86 will catch a memory order violation if it sees that an external store was done to A and thus must flush the entire +// pipeline of all the speculated load A. Pause instruction tells the cpu to not do speculative loads so that the pipeline is not +// filled with all said speculative load instructions. This ensures we do not incur the costly pipeline flushes from memory order +// violations which are likely to occur in tight spin wait loops. This also allows other threads on the same physical core to use the +// core's resources better since our speculative nature won't be hogging it all. +// +// A LDST control dependency is a true dependency in which the cpu cannot make a store visible to the system and other cpus until it +// knows its prediction is correct. Thus a LDST ordering is guaranteed and can be always relied upon as in the following example. +// +// r0 = LOAD(A); +// if (r0) +// STORE(B, 1); +// +// The fun part comes in with how does the compiler actually break all of this. +// First is that if the compiler can ensure that the value of A in the LDST example is always not zero, then it is always within its +// rights to completely remove the if statement which would lend us with no control dependency. +// +// Things get more fun when we deal with conditionals with else and else if statements where the compiler might be able to employ +// invariant code motion optimizations. Take this example. +// +// r0 = LOAD(A); +// r1 = LOAD(B); +// if (r0) +// STORE(B, 1); +// /* MORE CODE */ +// else if (r1) +// STORE(B, 1); +// /* MORE CODE */ +// else +// STORE(B, 1); +// /* MORE CODE */ +// +// If we were trying to be smart and entirely rely on the control dependency to ensure order, ya well just don't the compiler +// is always smarter. The compiler is well within its rights to move all the STORE(B, 1) up and above all the conditionals breaking +// our reliance on the LDST control dependency. +// +// Things can get even more complicated especially in C++ when values may come from constexpr, inline, inline constexpr, static const, etc, +// variables and thus the compiler will do all sorts of transformations to reduce, remove, augment and change all your conditional code since +// it knows the values of the expressions or even parts of it at compile time. Even more aggressive optimizations like LTO might break code that was being cautious. +// Even adding simple short circuiting logic or your classic likely/unlikely macros can alter conditionals in ways you didn't expect. +// In short know enough about control dependencies to know not to ever use them. +// +// ******** Multi-Copy Store Atomicity && Barrier Cumulativity ******** +// +// Single-Copy Store Atomicity: All stores must become visible to all cores in the system at the same time. +// +// Multi-Copy Store Atomicity : This means a cpu sees its own stores before they become visible to other cpus, by forwarding them from the store buffer, +// but a store becomes visible to all other cpus at the same time when flushed from the store buffer. +// +// Non-Atomic Store Atomicity : A store becomes visible to different cpus at different times. +// +// Those are the above variations of Store Atomicity. Most processors have Non-Atomic Store Atomicity and thus you must program to that lowest common denominator. +// We can use barriers, with some caveats, to restore Multi-Copy Store Atomicity to a Non-Atomic system though we need to define a new granular definition for +// memory barriers to define this behaviour. Simple LDLD/LDST/STST/STLD definition is not enough to categorize memory barriers at this level. Let's start off +// with a simple example that breaks under a Non-Atomic Store Atomicity system and what potential hardware features allow this behaviour to be observed. +// +// NOTE: For all the below examples we assume no compile reordering and that the processor also executes the instructions with no local reorderings to make the examples simpler, +// to only show off the effects of Multi-Copy Store Atomicity. This is why we don't add any address dependencies, or mark explicit LDLD/LDST memory barriers. +// Thus you may assume all LDLD and LDST pairs have an address dependency between them, so that they are not reordered by the compiler or the local cpu. +// +// --------------------------------------------------------------------------------------------------------- +// Write-To-Read Causality, WRC, Litmus Test +// --------------------------------------------------------------------------------------------------------- +// Initial State: +// X = 0; Y = 0; +// --------------------------------------------------------------------------------------------------------- +// Thread 0 | Thread 1 | Thread 2 +// --------------------------------------------------------------------------------------------------------- +// STORE(X, 1) | r0 = LOAD(X) | r1 = LOAD(Y) +// | STORE(Y, r0) | r2 = LOAD(X) +// --------------------------------------------------------------------------------------------------------- +// Observed: r0 = 1 && r1 = 1 && r2 = 0 +// --------------------------------------------------------------------------------------------------------- +// +// Let's go over this example in detail and whether the outcome shown above can be observed. In this example Thread 0 stores 1 into X. If Thread 1 observes the write to X, +// it stores the observed value into Y. Thread 2 loads from Y then X. This means if the load from Y returns 1, then we intuitively know the global store order +// was 1 to X and then 1 to Y. So is it possible then that the load from X in Thread 2 can return 0 in that case? Under a Multi-Copy Store Atomicity system, that would be +// impossible because once 1 was stored to X all cpus see that store so if Thread 2 saw the store to Y which can only happen after the store to X was observed, then +// Thread 2 must also have observed the store to X and return 1. As you may well have figured out, it is possible under a Non-Atomic Store Atomicity system to still +// observe the load from X returning 0 even if the above load from Y returned 1 in Thread 2. This completely breaks our intuition of causality. Let's now understand what hardware may cause this. +// +// This is possible on cpus that have Simultaneous Multi-Threading, SMT or HyperThreading in Intel parlance, which share resources such as store buffers or L1 cache. +// We are accustomed to the x86 way of SMT where each logical core shares Execution Units on the physical core but each logical core has their own statically partitioned +// cache and store buffer that is not visible to the other cpus. It is possible on cpus like ARMv7 or POWER, POWER9 supports 4 and even 8 threads per physical core, so +// to save on die space though yet enable this large number of threads per physical core it is common for these logical cores to all use the same store buffer or L1 cache +// per physical core on these processors. Let's take the above example and rerun it with this knowledge to get the observed behaviour outlined above. +// +// Assume Thread 0, Thread 1, and Thread 2 run on cpu 0, cpu 1, and cpu 2 respectively. Assume that cpu 0 and cpu 1 are two logical cores on the same physical core so this processor +// has an SMT value of 2. Thread 0 will store 1 into X. This store may be in the store buffer or in the L1 cache that cpu 1 also shares with cpu 0, thus cpu 1 has early access to cpu 0's stores. +// Thread 1 loads X which it observed as 1 early and then stores 1 into Y. Thread 2 may see the load from Y returning 1 but now the load from X returning 0 all because cpu 1 got early +// access to cpu 0 store due to sharing a L1 cache or store buffer. +// We will come back on how to fix this example with the proper memory barriers for the Non-Atomic Store Atomicity systems, but we need to detour first. +// +// We need to take a deeper dive into memory barriers to understand how to restore Multi-Copy Store Atomicity from a Non-Atomic Store Atomicity system. +// Let's start with a motivating example and we will be using the POWER architecture throughout this example because it encompasses all the possible observable behaviour. +// ARMv7 technically allows Non-Atomic Store Atomicity behaviour but no consumer ARMv7 chip actually observes this behaviour. +// ARMv8 reworked its model to specifically say it is a Multi-Copy Store Atomicity system. +// POWER is one of the last few popular consumer architectures that are guaranteed to have Non-Atomic Store Atomicity observable behaviour, thus we will be using it for the following examples. +// +// To preface, POWER has two types of memory barriers called lwsync and sync. The following table lists the guarantees provided by TSO, x86, and the lwsync instruction. +// The table gives a hint as to why using our previous definition of LDLD/LDST/STST/STLD isn't granular enough to categorize memory barrier instructions. +// +// TSO: | POWER lwsync memory barrier: +// LDLD : YES | LDLD : YES +// LDST : YES | LDST : YES +// STST : YES | STST : YES +// STLD : NO | STLD : NO +// A cumulative : YES | A cumulative : YES +// B cumulative : YES | B cumulative : YES +// IRIW : YES | IRIW : NO +// +// The TSO memory model provided by x86 seems to be exactly the same as POWER if we add lwsync memory barrier instructions in between each of the memory instructions. +// This provides us the exact same ordering guarantees as the TSO memory model. If we just looked at the 4 permutations of reorderings we would be inclined to assume that +// TSO has the exact same ordering as sprinkling lwsync in our code in between every pair of memory instructions. That is not the case because memory barrier causality and cumulativity differ in subtle ways. +// In this case they differ by the implicit guarantees from the TSO memory model versus those provided by the POWER lwsync memory barrier. +// So the lwsync memory barrier prevents reordering with instructions that have causality but does not prevent reordering with instructions that are completely independent. +// Let's dive into these concepts a bit more. +// +// Non-Atomic Store Atomicity architectures are prone to behaviours such as the non-causal outcome of the WRC test above. Architectures such as POWER defines memory barriers to enforce +// ordering with respect to memory accesses in remote cpus other than the cpu actually issuing the memory barrier. This is known as memory barrier cumulativity. +// How does the memory barrier issued on my cpu affect the view of memory accesses done by remote cpuss. +// +// Cumulative memory barriers are defined as follows - Take your time this part is very non-trivial: +// A-Cumulative: We denote group A as the set of memory instructions in this cpu or other cpus that are ordered before the memory barrier in this cpu. +// A-Cumulativity requires that memory instructions from any cpu that have performed prior to a memory load before the memory barrier on this cpu are also members of group A. +// B-Cumulative: We denote group B as the set of memory instructions in this cpu or other cpus that are ordered after the memory barrier in this cpu. +// B-Cumulativity requires that memory instructions from any cpu that perform after a load and including the load in that cpu that returns the value of a store in group B are +// also members of group B. +// IRIW : enforces a global ordering even for memory instructions that have no causality. The memory instructions are completely independent. +// +// --------------------------------------------------------------------------------------------------------- +// WRC Litmus Test +// --------------------------------------------------------------------------------------------------------- +// Thread 0 | Thread 1 | Thread 2 +// --------------------------------------------------------------------------------------------------------- +// {i} : STORE(X, 1) | {ii} : r0 = LOAD(X) | {v} : r1 = LOAD(Y) +// | {iii} : lwsync | +// | {iv} : STORE(Y, r0) | {vi} : r2 = LOAD(X) +// --------------------------------------------------------------------------------------------------------- +// Outcome: r0 = 1 && r1 = 1 && r2 = 1 +// +// Group A of {iii} : {i} && {ii} +// +// Group B of {iii} : {iv} && {v} && {vi} +// --------------------------------------------------------------------------------------------------------- +// +// Using the WRC test again and inserting a POWER lwsync, don't concern yourself with why the memory barrier was inserted at that spot right now, we now see the distinctions of group A and group B. +// It demonstrates the A and B Cumulative nature of the lwsync instruction, {iii}. First group A, initially consists of {ii} and group B initially consists of {iv} from the local cpu that issued the lwsync. +// Since {ii} reads from {i} and assume {i} happens before {ii}, by definition of A-Cumulativity {i} is included in group A. +// Similarly {v} reads from {iv} and assume {iv} happens before {v}, then {v} is included in group B by definition of B-Cumulativity. +// {vi} is also included in group B since it happens after {v} by definition of B-Cumulativity. +// +// WRC litmus test represents a scenario where only a A-Cumulative memory barrier is needed. The lwsync not only provides the needed local LDST memory barrier for the local thread but also ensures +// that any write Thread 1 has read from before the memory barrier is kept in order with any write Thread 1 does after the memory barrier as far as any other thread observes. +// In other words it ensures that any write that has propagated to Thread 1 before the memory barrier is propagated to any other thread before the second store after the memory barrier in Thread 1 +// can propagate to other threads in the system. This is exactly the definition of A-Cumulativity and what we need to ensure that causality is maintained in the WRC Litmus Test example. +// With that lwsync in place it is now impossible to observe r0 = 1 && r1 = 1 && r2 = 0. The lwsync has restored causal ordering. Let's look at an example that requires B-Cumulativity. +// +// --------------------------------------------------------------------------------------------------------- +// Example 2 from POWER manual +// --------------------------------------------------------------------------------------------------------- +// Initial State: +// X = 0; Y = 0; Z = 0 +// --------------------------------------------------------------------------------------------------------- +// Thread 0 | Thread 1 | Thread 2 +// --------------------------------------------------------------------------------------------------------- +// STORE(X, 1) | r0 = LOAD(Y) | r1 = LOAD(Z) +// STORE(Y, 1) | STORE(Z, r0) | r2 = LOAD(X) +// --------------------------------------------------------------------------------------------------------- +// Observed: r0 = 1 && r1 = 1 && r2 = 0 +// --------------------------------------------------------------------------------------------------------- +// +// This example is very similar to WRC except that we kinda extended the Message Passing through an additional shared variable instead. +// Think of this as Thread 0 writing some data into X, setting flag Y, Thread 1 waiting for flag Y then writing flag Z, and finally Thread 2 waiting for flag Z before reading the data. +// Take a minute to digest the above example and think about where a memory barrier, lwsync, should be placed. Don't peek at the solution below. +// +// --------------------------------------------------------------------------------------------------------- +// Example 2 from POWER manual +// --------------------------------------------------------------------------------------------------------- +// Thread 0 | Thread 1 | Thread 2 +// --------------------------------------------------------------------------------------------------------- +// STORE(X, 1) | r0 = LOAD(Y) | r1 = LOAD(Z) +// lwsync | | +// STORE(Y, 1) | STORE(Z, r0) | r2 = LOAD(X) +// --------------------------------------------------------------------------------------------------------- +// +// First the lwsync provides the needed local STST memory barrier for the local thread, thus the lwsync here ensures that the store to X propagates to Thread 1 before the store to Y. +// B-Cumulativity applied to all operations after the memory barrier ensure that the store to X is +// kept in order with respect to the store to Z as far as all other threads participating in the dependency chain are concerned. This is the exact definition of B-Cumulativity. +// With this one lwsync the outcome outlined above is impossible to observe. If r0 = 1 && r1 = 1 then r2 must be properly observed to be 1. +// +// We know that lwsync only provides A-Cumulativity and B-Cumulativity. Now we will look at examples that have no causality constraints thus we need to grab heavier memory barriers +// that ensures in short we will say makes a store become visible to all processors, even those not on the dependency chains. Let's get to the first example. +// +// --------------------------------------------------------------------------------------------------------- +// Independent Reads of Independent Writes, IRIW, coined by Doug Lea +// --------------------------------------------------------------------------------------------------------- +// Initial State: +// X = 0; Y = 0; +// --------------------------------------------------------------------------------------------------------- +// Thread 0 | Thread 1 | Thread 2 | Thread 3 +// --------------------------------------------------------------------------------------------------------- +// STORE(X, 1) | r0 = LOAD(X) | STORE(Y, 1) | r2 = LOAD(Y) +// | r1 = LOAD(Y) | | r3 = LOAD(X) +// --------------------------------------------------------------------------------------------------------- +// Observed: r0 = 1 && r1 = 0 && r2 = 1 && r3 = 0 +// --------------------------------------------------------------------------------------------------------- +// +// The IRIW example above clearly shows that writes can be propagated to different cpus in completely different orders. +// Thread 1 sees the store to X but not the store to Y while Thread 3 sees the store to Y but not the store to X, the complete opposite. +// Also to the keen eye you may have noticed this example is a slight modification of the Store Buffer example so try to guess where the memory barriers would go. +// +// --------------------------------------------------------------------------------------------------------- +// Independent Reads of Independent Writes, IRIW, coined by Doug Lea +// --------------------------------------------------------------------------------------------------------- +// Thread 0 | Thread 1 | Thread 2 | Thread 3 +// --------------------------------------------------------------------------------------------------------- +// STORE(X, 1) | r0 = LOAD(X) | STORE(Y, 1) | r2 = LOAD(Y) +// | sync | | sync +// | r1 = LOAD(Y) | | r3 = LOAD(X) +// --------------------------------------------------------------------------------------------------------- +// +// To ensure that the above observation is forbidden we need to add a full sync memory barrier on both the reading threads. Think of sync as restoring sequential consistency. +// The sync memory barrier ensures that any writes that Thread 1 has read from before the memory barrier are fully propagated to all threads before the reads are satisfied after the memory barrier. +// The same can be said for Thread 3. This is why the sync memory barrier is needed because there is no partial causal ordering here or anything that can be considered for our A and B Cumulativity definitions. +// We must ensure that all writes have been propagated to all cpus before proceeding. This gives way to the difference between sync and lwsync with regards to visibility of writes and cumulativity. +// sync guarantees that all program-order previous stores must have been propagated to all other cpus before the memory instructions after the memory barrier. +// lwsync does not ensure that stores before the memory barrier have actually propagated to any other cpu before memory instructions after the memory barrier, but it will keep stores before and after the +// lwsync in order as far as other cpus are concerned that are within the dependency chain. +// +// Fun fact while ARMv7 claims to be Non-Atomic Store Atomicity no mainstream ARM implementation that I have seen has shown cases of Non-Atomic Store Atomicity. +// It's allowed by the ARMv7 memory model and thus you have to program to that. ARMv8 changes this and states that it has Multi-Copy Store Atomicity. +// +// ******** Release-Acquire Semantics ******** +// +// The most useful and common cases where Release-Acquire Semantics are used in every day code is in message passing and mutexes. Let's get onto some examples and the C++ definition of Release-Acquire. +// +// ACQUIRE: +// An Acquire operation is a one-way memory barrier whereby all loads and stores after the acquire operation cannot move up and above the acquire operation. +// Loads and stores before the acquire operation can move down past the acquire operation. An acquire operation should always be paired with a Release operation on the SAME atomic object. +// +// RELEASE: +// A Release operation is a one-way memory barrier whereby all loads and stores before the release operation cannot move down and below the release operation. +// Loads and stores after the release operation can move up and above the release operation. A release operation should always be paired with an Acquire operation on the SAME atomic object. +// +// Release-Acquire pair does not create a full memory barrier but it guarantees that all memory instructions before a Release operation on an atomic object M are visible after an Acquire +// operation on that same atomic object M. Thus these semantics usually are enough to preclude the need for any other memory barriers. +// The synchronization is established only between the threads Releasing and Acquiring the same atomic object M. +// +// --------------------------------------------------- +// Critical Section +// --------------------------------------------------- +// Thread 0 | Thread 1 +// --------------------------------------------------- +// mtx.lock() - Acquire | mtx.lock() - Acquire +// STORE(X, 1) | r0 = LOAD(X) +// mtx.unlock() - Release | mtx.unlock() - Release +// --------------------------------------------------- +// +// A mutex only requires Release-Acquire semantics to protect the critical section. We do not care if operations above the lock leak into the critical section or that operations below the unlock leak into the +// critical section because they are outside the protected region of the lock()/unlock() pair. Release-Acquire semantics does guarantee that everything inside the critical section cannot leak out. +// Thus all accesses of all previous critical sections for the mutex are guaranteed to have completed and be visible when the mutex is handed off to the next party due to the Release-Acquire chaining. +// This also means that mutexes do not provide or restore Multi-Copy Store Atomicity to any memory instructions outside the mutex, like the IRIW example since it does not emit full memory barriers. +// +// ------------------------------------------------------ +// Message Passing +// ------------------------------------------------------ +// Thread 0 | Thread 1 +// ------------------------------------------------------ +// STORE(DATA, 1) | while (!LOAD_ACQUIRE(FLAG)) +// | +// STORE_RELEASE(FLAG, 1) | r0 = LOAD(DATA) +// ------------------------------------------------------ +// +// This is a common message passing idiom that also shows the use of Release-Acquire semantics. It should be obvious by the definitions outlined above why this works. +// An Acquire operation attached to a load needs to provide a LDLD and LDST memory barrier according to our definition of acquire. This is provided by default on x86 TSO thus no memory barrier is emitted. +// A Release operation attached to a store needs to provide a STST and LDST memory barrier according to our definition of release. This is provided by default on x86 TSO thus no memory barrier is emitted. +// +// A couple of things of note here. One is that by attaching the semantics of a memory model directly to the memory instruction/operation itself we can take advantage of the fact the some processors +// already provide guarantees between memory instructions and thus we do not have to emit memory barriers. Another thing of note is that the memory model is directly attached to the operation, +// so you must do the Release-Acquire pairing on the SAME object which in this case is the FLAG variable. Doing an Acquire or Release on a separate object has no guarantee to observe an Acquire or Release on a different object. +// This better encapsulates the meaning of the code and also allows the processor to potentially do more optimizations since a stand alone memory barrier will order all memory instructions of a given type before and after the barrier. +// Where as the memory ordering attached to the load or store tells the processor that it only has to order memory instructions in relation to that specific load or store with the given memory order. +// +// +// --------------------------------------------------------------------------------------------------------- +// Release Attached to a Store VS. Standalone Fence +// --------------------------------------------------------------------------------------------------------- +// STORE(DATA, 1) | STORE(DATA, 1) +// | ATOMIC_THREAD_FENCE_RELEASE() +// STORE_RELEASE(FLAG, 1) | STORE_RELAXED(FLAG, 1) +// STORE_RELAXED(VAR, 2) | STORE_RELAXED(VAR, 2) +// --------------------------------------------------------------------------------------------------------- +// ARMv8 Assembly +// --------------------------------------------------------------------------------------------------------- +// str 1, DATA | str 1, DATA +// | dmb ish +// stlr 1, FLAG | str 1, FLAG +// str 2, VAR | str 2, VAR +// --------------------------------------------------------------------------------------------------------- +// +// In the above example the release is attached to the FLAG variable, thus synchronization only needs to be guaranteed for that atomic variable. +// It is entirely possible for the VAR relaxed store to be reordered above the release store. +// In the fence version, since the fence is standalone, there is no notion where the release is meant to be attached to thus the fence must prevent all subsequent relaxed stores +// from being reordered above the fence. The fence provides a stronger guarantee whereby now the VAR relaxed store cannot be moved up and above the release operation. +// Also notice the ARMv8 assembly is different, the release fence must use the stronger dmb ish barrier instead of the dedicated release store instruction. +// We dive more into fences provided by eastl::atomic below. +// +// Release-Acquire semantics also have the property that it must chain through multiple dependencies which is where our knowledge from the previous section comes into play. +// Everything on the Release-Acquire dependency chain must be visible to the next hop in the chain. +// +// --------------------------------------------------------------------------------------------------------- +// Example 2 from POWER manual +// --------------------------------------------------------------------------------------------------------- +// Thread 0 | Thread 1 | Thread 2 +// --------------------------------------------------------------------------------------------------------- +// STORE(X, 1) | r0 = LOAD_ACQUIRE(Y) | r1 = LOAD_ACQUIRE(Z) +// STORE_RELEASE(Y, 1) | STORE_RELEASE(Z, r0) | r2 = LOAD(X) +// --------------------------------------------------------------------------------------------------------- +// +// --------------------------------------------------------------------------------------------------------- +// Write-To-Read Causality, WRC, Litmus Test +// --------------------------------------------------------------------------------------------------------- +// Thread 0 | Thread 1 | Thread 2 +// --------------------------------------------------------------------------------------------------------- +// STORE(X, 1) | r0 = LOAD(X) | r1 = LOAD_ACQUIRE(Y) +// | STORE_RELEASE(Y, r0) | r2 = LOAD(X) +// --------------------------------------------------------------------------------------------------------- +// +// You may notice both of these examples from the previous section. We replaced the standalone POWER memory barrier instructions with Release-Acquire semantics attached directly to the operations where we want causality preserved. +// We have transformed those examples to use the eastl::atomic memory model. +// Take a moment to digest these examples in relation to the definition of Release-Acquire semantics. +// +// The Acquire chain can be satisfied by reading the value from the store release or any later stored headed by that release operation. The following examples will make this clearer. +// +// ------------------------------------------------------ +// Release Sequence Headed +// ------------------------------------------------------ +// Initial State: +// DATA = 0; FLAG = 0; +// ------------------------------------------------------ +// Thread 0 | Thread 1 +// ------------------------------------------------------ +// STORE(DATA, 1) | r0 = LOAD_ACQUIRE(FLAG) +// | +// STORE_RELEASE(FLAG, 1) | r1 = LOAD(DATA) +// STORE_RELAXED(FLAG, 3) | +// ------------------------------------------------------ +// Observed: r0 = 3 && r1 = 0 +// ------------------------------------------------------ +// +// In the above example we may read the value 3 from FLAG which was not the release store, but it was headed by that release store. Thus we observed a later store and therefore it is still valid to then observe r1 = 1. +// The stores to FLAG from the STORE_RELEASE up to but not including the next STORE_RELEASE operation make up the release sequence headed by the first release store operation. Any store on that sequence can be used to enforce +// causality on the load acquire. +// +// ******** Consume is currently not useful ******** +// +// Consume is a weaker form of an acquire barrier and creates the Release-Consume barrier pairing. +// Consume states that a load operation on an atomic object M cannot allow any loads or stores dependent on the value loaded by the operation to be reordered before the operation. +// To understand consume we must first understand dependent loads. +// You might encounter this being called a data dependency or an address dependency in some literature. +// +// -------------------------------------------------------------- +// Address Dependency +// -------------------------------------------------------------- +// Initial State: +// DATA = 0; PTR = nullptr; +// -------------------------------------------------------------- +// Thread 0 | Thread 1 +// -------------------------------------------------------------- +// STORE(DATA, 1) | r0 = LOAD(PTR) - typeof(r0) = int* +// | +// STORE(PTR, &DATA) | r1 = LOAD(r0) - typeof(r1) = int +// -------------------------------------------------------------- +// +// There is a clear dependency here where we cannot load from *int until we actually read the int* from memory. +// Now it is possible for Thread 1's load from *ptr to be observed before the store to DATA, therefore it can lead to r0 = &DATA && r1 = 0. +// While this is a failure of causality, it is allowed by some cpus such as the DEC Alpha and I believe Blackfin as well. +// Thus a data dependency memory barrier must be inserted between the data dependent loads in Thread 1. Note that this would equate to a nop on any processor other than the DEC Alpha. +// +// This can occur for a variety of hardware reasons. We learned about invalidation queues. It is possible that the invalidation for DATA gets buffered in Thread 1. DEC Alpha allows the Thread 1 +// load from PTR to continue without marking the entries in its invalidation queue. Thus the subsequent load is allowed to return the old cached value of DATA instead of waiting for the +// marked entries in the invalidation queue to be processed. It is a design decision of the processor not to do proper dependency tracking here and instead relying on the programmer to insert memory barriers. +// +// This data dependent ordering guarantee is useful because in places where we were using an Acquire memory barrier we can reduce it to this Consume memory barrier without any hardware barriers actually emitted on every modern processor. +// Let's take the above example, translate it to Acquire and Consume memory barriers and then translate it to the ARMv7 assembly and see the difference. +// +// --------------------------------------------------------------- --------------------------------------------------------------- +// Address Dependency - Release-Acquire Address Dependency - Release-Acquire - ARMv7 Assembly +// --------------------------------------------------------------- --------------------------------------------------------------- +// Thread 0 | Thread 1 Thread 0 | Thread 1 +// --------------------------------------------------------------- --------------------------------------------------------------- +// STORE(DATA, 1) | r0 = LOAD_ACQUIRE(PTR) STORE(DATA, 1) | r0 = LOAD(PTR) +// | dmb ish | dmb ish +// STORE_RELEASE(PTR, &DATA) | r1 = LOAD(r0) STORE(PTR, &DATA) | r1 = LOAD(r0) +// --------------------------------------------------------------- --------------------------------------------------------------- +// +// To get Release-Acquire semantics on ARMv7 we need to emit dmb ish; memory barriers. +// +// --------------------------------------------------------------- --------------------------------------------------------------- +// Address Dependency - Release-Consume Address Dependency - Release-Consume - ARMv7 Assembly +// --------------------------------------------------------------- --------------------------------------------------------------- +// Thread 0 | Thread 1 Thread 0 | Thread 1 +// --------------------------------------------------------------- --------------------------------------------------------------- +// STORE(DATA, 1) | r0 = LOAD_CONSUME(PTR) STORE(DATA, 1) | r0 = LOAD(PTR) +// | dmb ish | +// STORE_RELEASE(PTR, &DATA) | r1 = LOAD(r0) STORE(PTR, &DATA) | r1 = LOAD(r0) +// --------------------------------------------------------------- --------------------------------------------------------------- +// +// Data Dependencies can not only be created by read-after-write/RAW on registers, but also by RAW on memory locations too. Let's look at some more elaborate examples. +// +// --------------------------------------------------------------- --------------------------------------------------------------- +// Address Dependency on Registers - Release-Consume - ARMv7 Address Dependency on Memory - Release-Consume - ARMv7 +// --------------------------------------------------------------- --------------------------------------------------------------- +// Thread 0 | Thread 1 Thread 0 | Thread 1 +// --------------------------------------------------------------- --------------------------------------------------------------- +// STORE(DATA, 1) | r0 = LOAD(PTR) STORE(DATA, 1) | r0 = LOAD(PTR) +// | r1 = r0 + 0 | STORE(TEMP, r0) +// dmb ish | r2 = r1 - 0 dmb ish | r1 = LOAD(TEMP) +// STORE(PTR, &DATA) | r3 = LOAD(r2) STORE(PTR, &DATA) | r2 = LOAD(r1) +// --------------------------------------------------------------- --------------------------------------------------------------- +// +// The above shows a more elaborate example of how data dependent dependencies flow through RAW chains either through memory or through registers. +// +// Notice by identifying that this is a data dependent operation and asking for a consume ordering, we can completely eliminate the memory barrier on Thread 1 since we know ARMv7 does not reorder data dependent loads. Neat. +// Unfortunately every major compiler upgrades a consume to an acquire ordering, because the consume ordering in the standard has a stronger guarantee and requires the compiler to do complicated dependency tracking. +// Dependency chains in source code must be mapped to dependency chains at the machine instruction level until a std::kill_dependency in the source code. +// +// ---------------------------------------------------------------- +// Non-Address Dependency && Multiple Chains +// ---------------------------------------------------------------- +// Initial State: +// std::atomic FLAG; int DATA[1] = 0; +// ---------------------------------------------------------------- +// Thread 0 | Thread 1 +// ---------------------------------------------------------------- +// STORE(DATA[0], 1) | int f = LOAD_CONSUME(FLAG) +// | int x = f +// | if (x) return Func(x); +// | +// STORE_RELEASE(FLAG, 1) | Func(int y) return DATA[y - y] +// ---------------------------------------------------------------- +// +// This example is really concise but there is a lot going on. Let's digest it. +// First is that the standard allows consume ordering even on what we will call not true machine level dependencies like a ptr load and then a load from that ptr as shown in the previous examples. +// Here the dependency is between two ints, and the dependency chain on Thread 1 is as follows. f -> x -> y -> DATA[y - y]. The standard requires that source code dependencies on the loaded value +// from consume flow thru assignments and even thru function calls. Also notice we added a dependency on the dereference of DATA with the value loaded from consume which while it does nothing actually abides by the standard +// by enforcing a source code data dependent load on the consume operation. You may see this referred to as artificial data dependencies in other texts. +// If we assume the compiler is able to track all these dependencies, the question is how do we enforce these dependencies at the machine instruction level. Let's go back to our ptr dependent load example. +// +// ---------------------------------------------------------------- +// addi r0, pc, offset; +// ldr r1, 0(r0); +// ldr r2, 0(r1); +// ---------------------------------------------------------------- +// +// The above pseudo assembly does a pc relative calculation to find the address of ptr. We then load ptr and then continue the dependency chain by loading the int from the loaded ptr. +// Thus r0 has type of int**, which we use to load r1 an int* which we use to load our final value of r2 which is the int. +// The key observation here is that most instructions provided by most architectures only allow moving from a base register + offset into a destination register. +// This allows for trivial capturing of data dependent loads through pointers. But how do we capture the data dependency of DATA[y - y]. We would need something like this. +// +// ---------------------------------------------------------------- +// sub r1, r0, r0; // Assume r0 holds y from the Consume Operation +// add r3, r1, r2; // Assume r2 holds the address of DATA[0] +// ldr r4, 0(r3); +// ---------------------------------------------------------------- +// +// We cannot use two registers as both arguments to the load instruction. Thus to accomplish this you noticed we had to add indirect data dependencies through registers to compute the final address from the consume +// load of y and then load from the final computed address. The compiler would have to recognize all these dependencies and enforce that they be maintained in the generated assembly. +// The compiler must ensure the entire syntactic, source code, data-dependency chain is enforced in the generated assembly, no matter how long such chain may be. +// Because of this and other issues, every major compiler unilaterally promotes consume to an acquire operation across the board. Read reference [15] for more information. +// This completely removes the actual usefulness of consume for the pointer dependent case which is used quite heavily in concurrent read heavy data structures where updates are published via pointer swaps. +// +// ******** read_depends use case - Release-ReadDepends Semantics ******** +// +// eastl::atomic provides a weaker read_depends operation that only encapsulates the pointer dependency case above. Loading from a pointer and then loading the value from the loaded pointer. +// The read_depends operation can be used on loads from only an eastl::atomic type. The return pointer of the load must and can only be used to then further load values. And that is it. +// If you are unsure, upgrade this load to an acquire operation. +// +// MyStruct* ptr = gAtomicPtr.load(memory_order_read_depends); +// int a = ptr->a; +// int b = ptr->b; +// return a + b; +// +// The loads from ptr after the gAtomicPtr load ensure that the correct values of a and b are observed. This pairs with a Release operation on the writer side by releasing gAtomicPtr. +// +// +// As said above the returned pointer from a .load(memory_order_read_depends) can only be used to then further load values. +// Dereferencing(*) and Arrow Dereferencing(->) are valid operations on return values from .load(memory_order_read_depends). +// +// MyStruct* ptr = gAtomicPtr.load(memory_order_read_depends); +// int a = ptr->a; - VALID +// int a = *ptr; - VALID +// +// Since dereferencing is just indexing via some offset from some base address, this also means addition and subtraction of constants is ok. +// +// int* ptr = gAtomicPtr.load(memory_order_read_depends); +// int a = *(ptr + 1) - VALID +// int a = *(ptr - 1) - VALID +// +// Casts also work correctly since casting is just offsetting a pointer depending on the inheritance hierarchy or if using intrusive containers. +// +// ReadDependsIntrusive** intrusivePtr = gAtomicPtr.load(memory_order_read_depends); +// ReadDependsIntrusive* ptr = ((ReadDependsIntrusive*)(((char*)intrusivePtr) - offsetof(ReadDependsIntrusive, next))); +// +// Base* basePtr = gAtomicPtr.load(memory_order_read_depends); +// Dervied* derivedPtr = static_cast(basePtr); +// +// Both of the above castings from the result of the load are valid for this memory order. +// +// You can reinterpret_cast the returned pointer value to a uintptr_t to set bits, clear bits, or xor bits but the pointer must be casted back before doing anything else. +// +// int* ptr = gAtomicPtr.load(memory_order_read_depends); +// ptr = reinterpret_cast(reinterpret_cast(ptr) & ~3); +// +// Do not use any equality or relational operator (==, !=, >, <, >=, <=) results in the computation of offsets before dereferencing. +// As we learned above in the Control Dependencies section, CPUs will not order Load-Load Control Dependencies. Relational and equality operators are often compiled using branches. +// It doesn't have to be compiled to branched, condition instructions could be used. Or some architectures provide comparison instructions such as set less than which do not need +// branches when using the result of the relational operator in arithmetic statements. Then again short circuiting may need to introduct branches since C++ guarantees the +// rest of the expression must not be evaluated. +// The following odd code is forbidden. +// +// int* ptr = gAtomicPtr.load(memory_order_read_depends); +// int* ptr2 = ptr + (ptr >= 0); +// int a = *ptr2; +// +// Only equality comparisons against nullptr are allowed. This is becase the compiler cannot assume that the address of the loaded value is some known address and substitute our loaded value. +// int* ptr = gAtomicPtr.load(memory_order_read_depends); +// if (ptr == nullptr); - VALID +// if (ptr != nullptr); - VALID +// +// Thus the above sentence that states: +// The return pointer of the load must and can only be used to then further load values. And that is it. +// must be respected by the programmer. This memory order is an optimization added for efficient read heavy pointer swapping data structures. IF you are unsure, use memory_order_acquire. +// +// ******** Relaxed && eastl::atomic guarantees ******** +// +// We saw various ways that compiler barriers do not help us and that we need something more granular to make sure accesses are not mangled by the compiler to be considered atomic. +// Ensuring these guarantees like preventing dead-store elimination or the splitting of stores into smaller sub stores is where the C/C++11 +// standard comes into play to define what it means to operate on an atomic object. +// These basic guarantees are provided via new compiler intrinsics on gcc/clang that provide explicit indication to the compiler. +// Or on msvc by casting the underlying atomic T to a volatile T*, providing stronger compiler guarantees than the standard requires. +// Essentially volatile turns off all possible optimizations on that variable access and ensures all volatile variables cannot be +// reordered across sequence points. Again we are not using volatile here to guarantee atomicity, we are using it in its very intended purpose +// to tell the compiler it cannot assume anything about the contents of that variable. Now let's dive into the base guarantees of eastl::atomic. +// +// The standard defines the following for all operations on an atomic object M. +// +// Write-Write Coherence: +// If an operation A modifies an atomic object M(store), happens before an operation B that modifies M(store), then A shall be earlier than B in the modification order of M. +// +// Read-Read Coherence: +// If a value computation A on an atomic object M(load), happens before a value computation B on M(load), and A takes its value from a side effect X on M(from a previous store to M), then the value +// computed by B shall either be the value stored by X or some later side effect Y on M, where Y follows X in the modification order of M. +// +// Read-Write Coherence: +// If a value computation A on an atomic object M(load), happens before an operation B that modifies M(store), then A shall take its value from a side effect X on M, where X precedes B in the modification +// order of M. +// +// Write-Read Coherence: +// If a side effect X on an atomic object M(store), happens before a value computation B on M(load), then the evaluation of B must take its value from X or from some side effect Y that follows X in the +// modification order of M. +// +// What does all this mean. This is just a pedantic way of saying that the preceding coherence requirements disallow compiler reordering of atomic operations to a single atomic object. +// This means all operations must be emitted by the compiler. Stores cannot be dead-store eliminated even if they are the only stores. +// Loads cannot have common subexpression elimination performed on them even if they are the only loads. +// Loads and Stores to the same atomic object cannot be reordered by the compiler. +// Compiler cannot introduce extra loads or stores to the atomic object. +// Compiler also cannot reload from an atomic object, it must save and store to a stack slot. +// Essentially this provides all the necessary guarantees needed when treating an object as atomic from the compilers point of view. +// +// ******** Same Address LoadLoad Reordering ******** +// +// It is expected that same address operations cannot and are not reordered with each other. It is expected that operations to the same address have sequential consistency because +// they are to the same address. If you picture a cpu executing instructions, how is it possible to reorder instructions to the same address and yet keep program behaviour the same. +// Same Address LoadLoad Reordering is one weakening that is possible to do and keep observed program behaviour for a single-threaded program. +// More formally, A and B are two memory instructions onto the same address P, where A is program ordered before B. If A and B are both loads then their order need not be ordered. +// If B is a store then it cannot retire the store before A instruction completes. If A is a store and B is a load, then B must get its value forwarded from the store buffer or observe a later store +// from the cache. Thus Same Address LDST, STST, STLD cannot be reordered but Same Address LDLD can be reordered. +// Intel Itanium and SPARC RMO cpus allow and do Same Address LoadLoad Reordering. +// Let's look at an example. +// +// --------------------------- +// Same Address LoadLoad +// --------------------------- +// Initial State: +// x = 0; +// --------------------------- +// Thread 0 | Thread 1 +// --------------------------- +// STORE(x, 1) | r0 = LOAD(x) +// | r1 = LOAD(x) +// --------------------------- +// Observed: r0 = 1 && r0 = 0 +// --------------------------- +// +// Notice in the above example it has appeared as if the two loads from the same address have been reordered. If we first observed the new store of 1, then the next load should not observe a value in the past. +// Many programmers, expect same address sequential consistency, all accesses to a single address appear to execute in a sequential order. +// Notice this violates the Read-Read Coherence for all atomic objects defined by the std and thus provided by eastl::atomic. +// +// All operations on eastl::atomic irrelevant of the memory ordering of the operation provides Same Address Sequential Consistency since it must abide by the coherence rules above. +// +// ******** eastl::atomic_thread_fence ******** +// +// eastl::atomic_thread_fence(relaxed) : Provides no ordering guarantees +// eastl::atomic_thread_fence(acquire) : Prevents all prior loads from being reordered with all later loads and stores, LDLD && LDST memory barrier +// eastl::atomic_thread_fence(release) : Prevents all prior loads and stores from being reordered with all later stores, STST && LDST memory barrier +// eastl::atomic_thread_fence(acq_rel) : Union of acquire and release, LDLD && STST && LDST memory barrier +// eastl::atomic_thread_fence(seq_cst) : Full memory barrier that provides a single total order +// +// See Reference [9] and Fence-Fence, Atomic-Fence, Fence-Atomic Synchronization, Atomics Order and Consistency in the C++ std. +// +// ******** Atomic && Fence Synchronization ******** +// +// --------------------------- +// Fence-Fence Synchronization +// --------------------------- +// A release fence A synchronizes-with an acquire fence B if there exist operations X and Y on the same atomic object M, such that fence A is sequenced-before operation X and X modifies M, +// operation Y is sequenced-before B and Y reads the value written by X. +// In this case all non-atomic and relaxed atomic stores that are sequenced-before fence A will happen-before all non-atomic and relaxed atomic loads after fence B. +// +// ---------------------------- +// Atomic-Fence Synchronization +// ---------------------------- +// An atomic release operation A on atomic object M synchronizes-with an acquire fence B if there exists some atomic operation X on atomic object M, such that X is sequenced-before B and reads +// the value written by A. +// In this case all non-atomic and relaxed atomic stores that are sequenced-before atomic release operation A will happen-before all non-atomic and relaxed atomic loads after fence B. +// +// ---------------------------- +// Fence-Atomic Synchronization +// ---------------------------- +// A release fence A synchronizes-with an atomic acquire operation B on an atomic object M if there exists an atomic operation X such that A is sequenced-before X, X modifies M and B reads the +// value written by X. +// In this case all non-atomic and relaxed atomic stores that are sequenced-before fence A will happen-before all non-atomic and relaxed atomic loads after atomic acquire operation B. +// +// This can be used to add synchronization to a series of several relaxed atomic operations, as in the following trivial example. +// +// ---------------------------------------------------------------------------------------- +// Initial State: +// x = 0; +// eastl::atomic y = 0; +// z = 0; +// eastl::atomic w = 0; +// ---------------------------------------------------------------------------------------- +// Thread 0 | Thread 1 +// ---------------------------------------------------------------------------------------- +// x = 2 | r0 = y.load(memory_order_relaxed); +// z = 2 | r1 = w.load(memory_order_relaxed); +// atomic_thread_fence(memory_order_release); | atomic_thread_fence(memory_order_acquire); +// y.store(1, memory_order_relaxed); | r2 = x +// w.store(1, memory_order_relaxed); | r3 = z +// ---------------------------------------------------------------------------------------- +// Observed: r0 = 1 && r1 = 1 && r2 = 0 && r3 = 0 +// ---------------------------------------------------------------------------------------- +// +// ******** Atomic vs Standalone Fence ******** +// +// A sequentially consistent fence is stronger than a sequentially consistent operation because it is not tied to a specific atomic object. +// An atomic fence must provide synchronization with ANY atomic object whereas the ordering on the atomic object itself must only provide +// that ordering on that SAME atomic object. Thus this can provide cheaper guarantees on architectures with dependency tracking hardware. +// Let's look at a concrete example that will make this all clear. +// +// ---------------------------------------------------------------------------------------- +// Initial State: +// eastl::atomic y = 0; +// eastl::atomic z = 0; +// ---------------------------------------------------------------------------------------- +// Thread 0 | Thread 1 +// ---------------------------------------------------------------------------------------- +// z.store(2, memory_order_relaxed); | r0 = y.load(memory_order_relaxed); +// atomic_thread_fence(memory_order_seq_cst); | atomic_thread_fence(memory_order_seq_cst); +// y.store(1, memory_order_relaxed); | r1 = z.load(memory_order_relaxed); +// ---------------------------------------------------------------------------------------- +// Observed: r0 = 1 && r1 = 0 +// ---------------------------------------------------------------------------------------- +// +// Here the two sequentially consistent fences synchronize-with each other thus ensuring that if we observe r0 = 1 then we also observe that r1 = 2. +// In the above example if we observe r0 = 1 it is impossible to observe r1 = 0. +// +// ---------------------------------------------------------------------------------------- +// Initial State: +// eastl::atomic x = 0; +// eastl::atomic y = 0; +// eastl::atomic z = 0; +// ---------------------------------------------------------------------------------------- +// Thread 0 | Thread 1 +// ---------------------------------------------------------------------------------------- +// z.store(2, memory_order_relaxed); | r0 = y.load(memory_order_relaxed); +// x.fetch_add(1, memory_order_seq_cst); | x.fetch_add(1, memory_order_seq_cst); +// y.store(1, memory_order_relaxed); | r1 = z.load(memory_order_relaxed); +// ---------------------------------------------------------------------------------------- +// Observed: r0 = 1 && r1 = 0 +// ---------------------------------------------------------------------------------------- +// +// Here the two fetch_add sequentially consistent operations on x synchronize-with each other ensuring that if we observe r0 = 1 then we cannot observer r1 = 0; +// The thing to take note here is that we synchronized on the SAME atomic object, that being the atomic object x. +// Note that replacing the x.fetch_add() in Thread 1 with a sequentially consistent operation on another atomic object or a sequentially consistent fence can lead to +// observing r1 = 0 even if we observe r0 = 1. For example the following code may fail. +// +// ---------------------------------------------------------------------------------------- +// Initial State: +// eastl::atomic x = 0; +// eastl::atomic y = 0; +// eastl::atomic z = 0; +// ---------------------------------------------------------------------------------------- +// Thread 0 | Thread 1 +// ---------------------------------------------------------------------------------------- +// z.store(2, memory_order_relaxed); | r0 = y.load(memory_order_relaxed); +// | x.fetch_add(1, memory_order_seq_cst); +// y.fetch_add(1, memory_order_seq_cst); | r1 = z.load(memory_order_relaxed); +// ---------------------------------------------------------------------------------------- +// Observed: r0 = 1 && r1 = 0 +// ---------------------------------------------------------------------------------------- +// +// ---------------------------------------------------------------------------------------- +// Initial State: +// eastl::atomic x = 0; +// eastl::atomic y = 0; +// eastl::atomic z = 0; +// ---------------------------------------------------------------------------------------- +// Thread 0 | Thread 1 +// ---------------------------------------------------------------------------------------- +// z.store(2, memory_order_relaxed); | r0 = y.load(memory_order_relaxed); +// x.fetch_add(1, memory_order_seq_cst); | atomic_thread_fence(memory_order_seq_cst); +// y.store(1, memory_order_relaxed); | r1 = z.load(memory_order_relaxed); +// ---------------------------------------------------------------------------------------- +// Observed: r0 = 1 && r1 = 0 +// ---------------------------------------------------------------------------------------- +// +// In this example it is entirely possible that we observe r0 = 1 && r1 = 0 even though we have source code causality and sequentially consistent operations. +// Observability is tied to the atomic object on which the operation was performed and the thread fence doesn't synchronize-with the fetch_add because +// there is no load above the fence that reads the value from the fetch_add. +// +// ******** Sequential Consistency Semantics ******** +// +// See section, Order and consistency, in the C++ std and Reference [9]. +// +// A load with memory_order_seq_cst performs an acquire operation +// A store with memory_order_seq_cst performs a release operation +// A RMW with memory_order_seq_cst performs both an acquire and a release operation +// +// All memory_order_seq_cst operations exhibit the below single total order in which all threads observe all modifications in the same order +// +// Paraphrasing, there is a single total order on all memory_order_seq_cst operations, S, such that each sequentially consistent operation B that loads a value from +// atomic object M observes either the result of the last sequentially consistent modification A on M, or some modification on M that isn't memory_order_seq_cst. +// For atomic modifications A and B on an atomic object M, B occurs after A in the total order of M if: +// there is a memory_order_seq_cst fence X whereby A is sequenced before X, and X precedes B, +// there is a memory_order_seq_cst fence Y whereby Y is sequenced before B, and A precedes Y, +// there are memory_order_seq_cst fences X and Y such that A is sequenced before X, Y is sequenced before B, and X precedes Y. +// +// Let's look at some examples using memory_order_seq_cst. +// +// ------------------------------------------------------------ +// Store-Buffer +// ------------------------------------------------------------ +// Initial State: +// x = 0; y = 0; +// ------------------------------------------------------------ +// Thread 0 | Thread 1 +// ------------------------------------------------------------ +// STORE_RELAXED(x, 1) | STORE_RELAXED(y, 1) +// ATOMIC_THREAD_FENCE(SEQ_CST) | ATOMIC_THREAD_FENCE(SEQ_CST) +// r0 = LOAD_RELAXED(y) | r1 = LOAD_RELAXED(x) +// ------------------------------------------------------------ +// Observed: r0 = 0 && r1 = 0 +// ------------------------------------------------------------ +// +// ------------------------------------------------------------ +// Store-Buffer +// ------------------------------------------------------------ +// Initial State: +// x = 0; y = 0; +// ------------------------------------------------------------ +// Thread 0 | Thread 1 +// ------------------------------------------------------------ +// STORE_SEQ_CST(x, 1) | STORE_SEQ_CST(y, 1) +// r0 = LOAD_SEQ_CST(y) | r1 = LOAD_SEQ_CST(x) +// ------------------------------------------------------------ +// Observed: r0 = 0 && r1 = 0 +// ------------------------------------------------------------ +// +// Both solutions above are correct to ensure that the end results cannot lead to both r0 and r1 returning 0. Notice that the second one requires memory_order_seq_cst on both +// operations to ensure they are in the total order, S, for all memory_order_seq_cst operations. The other example uses the stronger guarantee provided by a sequentially consistent fence. +// +// ------------------------------------------------------------------------------------------------ +// Read-To-Write Causality +// ------------------------------------------------------------------------------------------------ +// Initial State: +// x = 0; y = 0; +// ------------------------------------------------------------------------------------------------ +// Thread 0 | Thread 1 | Thread 2 +// ------------------------------------------------------------------------------------------------ +// STORE_SEQ_CST(x, 1) | r0 = LOAD_RELAXED(x) | STORE_RELAXED(y, 1) +// | ATOMIC_THREAD_FENCE(SEQ_CST) | ATOMIC_THREAD_FENCE(SEQ_CST) +// | r1 = LOAD_RELAXED(y) | r2 = LOAD_RELAXED(x) +// ------------------------------------------------------------------------------------------------ +// Observed: r0 = 1 && r1 = 0 && r2 = 0 +// ------------------------------------------------------------------------------------------------ +// +// You'll notice this example is an in between example of the Store-Buffer and IRIW examples we have seen earlier. The store in Thread 0 needs to be sequentially consistent so it synchronizes with the +// thread fence in Thread 1. C++20 due to Reference [9], increased the strength of sequentially consistent fences has been increased to allow for the following. +// +// ------------------------------------------------------------------------------------------------ +// Read-To-Write Causality - C++20 +// ------------------------------------------------------------------------------------------------ +// Initial State: +// x = 0; y = 0; +// ------------------------------------------------------------------------------------------------ +// Thread 0 | Thread 1 | Thread 2 +// ------------------------------------------------------------------------------------------------ +// STORE_RELAXED(x, 1) | r0 = LOAD_RELAXED(x) | STORE_RELAXED(y, 1) +// | ATOMIC_THREAD_FENCE(SEQ_CST) | ATOMIC_THREAD_FENCE(SEQ_CST) +// | r1 = LOAD_RELAXED(y) | r2 = LOAD_RELAXED(x) +// ------------------------------------------------------------------------------------------------ +// Observed: r0 = 1 && r1 = 0 && r2 = 0 +// ------------------------------------------------------------------------------------------------ +// +// Notice we were able to turn the store in Thread 0 into a relaxed store and still properly observe either r1 or r2 returning 1. +// Note that all implementations of the C++11 standard for every architecture even now allows the C++20 behaviour. +// The C++20 standard memory model was brought up to recognize that all current implementations are able to implement them stronger. +// +// ******** False Sharing ******** +// +// As we know operations work on the granularity of a cacheline. A RMW operation obviously must have some help from the cache to ensure the entire operation +// is seen as one whole unit. Conceptually we can think of this as the cpu's cache taking a lock on the cacheline, the cpu doing the read-modify-write operation on the +// locked cacheline, and then releasing the lock on the cacheline. This means during that time any other cpu needing that cacheline must wait for the lock to be released. +// +// If we have two atomic objects doing RMW operations and they are within the same cacheline, they are unintentionally contending and serializing with each other even +// though they are two completely separate objects. This gives us the common name to this phenomona called false sharing. +// You can cacheline align your structure or the eastl::atomic object to prevent false sharing. +// +// ******** union of eastl::atomic ******** +// +// union { eastl::atomic atomic8; eastl::atomic atomic32; }; +// +// While we know that operations operate at the granularity of a processor's cacheline size and so we may expect that storing and loading +// from different width atomic variables at the same address to not cause weird observable behaviour but it may. +// Store Buffers allow smaller stores to replace parts of larger loads that are forwarded from a store buffer. +// This means if there is 2 bytes of modified data in the store buffer that overlaps with a 4 byte load, the 2 bytes will be forwarded +// from the store buffer. This is even documented behaviour of the x86 store buffer in the x86 architecture manual. +// This behaviour can cause processors to observe values that have never and will never be visible on the bus to other processors. +// The use of a union with eastl::atomic is not wrong but your code must be able to withstand these effects. +// +// Assume everything starts out initially as zero. +// +// ------------------------------------------------------------------------------------------------------- +// Thread 0 | Thread 1 | Thread 2 +// -------------------------------------------------------------------------------------------------------- +// cmpxchg 0 -> 0x11111111 | cmpxchg 0x11111111 -> 0x22222222 | mov byte 0x33; mov 4 bytes into register; +// --------------------------------------------------------------------------------------------------------- +// +// After all operations complete, the value in memory at that location is, 0x22222233. +// It is possible that the 4 byte load in thread 2 actually returns 0x11111133. +// Now 0x11111133 is an observed value that no other cpu could observe because it was never globally visible on the data bus. +// +// If the value in memory is 0x22222233 then the first cmpxchg succeeded, then the second cmpxchg succeeded and finally our +// byte to memory was stored, yet our load returned 0x11111133. This is because store buffer contents can be forwarded to overlapping loads. +// It is possible that the byte store got put in the store buffer. Our load happened after the first cmpxchg with the byte forwarded. +// This behaviour is fine as long as your algorithm is able to cope with this kind of store buffer forwarding effects. +// +// Reference [13] is a great read on more about this topic of mixed-size concurrency. +// + + +///////////////////////////////////////////////////////////////////////////////// + + +#include +#include +#include +#include + + +#endif /* EASTL_ATOMIC_H */ diff --git a/external/EASTL/include/EASTL/bit.h b/external/EASTL/include/EASTL/bit.h new file mode 100644 index 00000000..aba48842 --- /dev/null +++ b/external/EASTL/include/EASTL/bit.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_BIT_H +#define EASTL_BIT_H + +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +#include +#include +#include +#include // memcpy + +namespace eastl +{ + // eastl::bit_cast + // Obtains a value of type To by reinterpreting the object representation of 'from'. + // Every bit in the value representation of the returned To object is equal to the + // corresponding bit in the object representation of 'from'. + // + // In order for bit_cast to be constexpr, the compiler needs to explicitly support + // it by providing the __builtin_bit_cast builtin. If that builtin is not available, + // then we memcpy into aligned storage at runtime and return that instead. + // + // Both types To and From must be equal in size, and must be trivially copyable. + + #if defined(EASTL_CONSTEXPR_BIT_CAST_SUPPORTED) && EASTL_CONSTEXPR_BIT_CAST_SUPPORTED + + template::value + && eastl::is_trivially_copyable::value + > + > + EA_CONSTEXPR To bit_cast(const From& from) EA_NOEXCEPT + { + return __builtin_bit_cast(To, from); + } + + #else + + template::value + && eastl::is_trivially_copyable::value + > + > + inline To bit_cast(const From& from) EA_NOEXCEPT + { + typename eastl::aligned_storage::type to; + ::memcpy(eastl::addressof(to), eastl::addressof(from), sizeof(To)); + return reinterpret_cast(to); + } + + #endif // EASTL_CONSTEXPR_BIT_CAST_SUPPORTED + + #if defined(EA_COMPILER_CPP20_ENABLED) + #ifndef EASTL_COUNT_LEADING_ZEROES + #if defined(__GNUC__) + #if (EA_PLATFORM_PTR_SIZE == 8) + #define EASTL_COUNT_LEADING_ZEROES __builtin_clzll + #else + #define EASTL_COUNT_LEADING_ZEROES __builtin_clz + #endif + #endif + + #ifndef EASTL_COUNT_LEADING_ZEROES + static inline int eastl_count_leading_zeroes(uint64_t x) + { + if(x) + { + int n = 0; + if(x & UINT64_C(0xFFFFFFFF00000000)) { n += 32; x >>= 32; } + if(x & 0xFFFF0000) { n += 16; x >>= 16; } + if(x & 0xFFFFFF00) { n += 8; x >>= 8; } + if(x & 0xFFFFFFF0) { n += 4; x >>= 4; } + if(x & 0xFFFFFFFC) { n += 2; x >>= 2; } + if(x & 0xFFFFFFFE) { n += 1; } + return 63 - n; + } + return 64; + } + + static inline int eastl_count_leading_zeroes(uint32_t x) + { + if(x) + { + int n = 0; + if(x <= 0x0000FFFF) { n += 16; x <<= 16; } + if(x <= 0x00FFFFFF) { n += 8; x <<= 8; } + if(x <= 0x0FFFFFFF) { n += 4; x <<= 4; } + if(x <= 0x3FFFFFFF) { n += 2; x <<= 2; } + if(x <= 0x7FFFFFFF) { n += 1; } + return n; + } + return 32; + } + + #define EASTL_COUNT_LEADING_ZEROES eastl_count_leading_zeroes + #endif + #endif + + template >> + EA_CONSTEXPR int countl_zero(const T num) EA_NOEXCEPT + { + EA_CONSTEXPR auto DIGITS = eastl::numeric_limits::digits; + EA_CONSTEXPR auto DIGITS_U = eastl::numeric_limits::digits; + EA_CONSTEXPR auto DIGITS_ULL = eastl::numeric_limits::digits; + + if (num == 0) + { + return DIGITS; + } + + if constexpr (DIGITS <= DIGITS_U) + { + EA_CONSTEXPR auto DIFF = DIGITS_U - DIGITS; + return EASTL_COUNT_LEADING_ZEROES(static_cast(num)) - DIFF; + } + else + { + EA_CONSTEXPR auto DIFF = DIGITS_ULL - DIGITS; + return EASTL_COUNT_LEADING_ZEROES(static_cast(num)) - DIFF; + } + } + + template >> + EA_CONSTEXPR bool has_single_bit(const T num) EA_NOEXCEPT + { + return num != 0 && (num & (num - 1)) == 0; + } + + template >> + EA_CONSTEXPR T bit_ceil(const T num) EA_NOEXCEPT + { + if (num <= 1U) + { + return T(1); + } + + const auto shift = eastl::numeric_limits::digits - eastl::countl_zero(static_cast(num - 1)); + return static_cast(T(1) << shift); + } + + template >> + EA_CONSTEXPR T bit_floor(const T num) EA_NOEXCEPT + { + if (num == 0) + { + return T(0); + } + + const auto shift = eastl::numeric_limits::digits - eastl::countl_zero(num) - 1; + return static_cast(T(1) << shift); + } + + template >> + EA_CONSTEXPR T bit_width(const T num) EA_NOEXCEPT + { + return static_cast(eastl::numeric_limits::digits - eastl::countl_zero(num)); + } + #endif + +} // namespace eastl + +#endif // EASTL_BIT_H diff --git a/external/EASTL/include/EASTL/bitset.h b/external/EASTL/include/EASTL/bitset.h new file mode 100644 index 00000000..13e12c84 --- /dev/null +++ b/external/EASTL/include/EASTL/bitset.h @@ -0,0 +1,2109 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// This file implements a bitset much like the C++ std::bitset class. +// The primary distinctions between this bitset and std::bitset are: +// - bitset is more efficient than some other std::bitset implementations, +// notably the bitset that comes with Microsoft and other 1st party platforms. +// - bitset is savvy to an environment that doesn't have exception handling, +// as is sometimes the case with console or embedded environments. +// - bitset is savvy to environments in which 'unsigned long' is not the +// most efficient integral data type. std::bitset implementations use +// unsigned long, even if it is an inefficient integer type. +// - bitset removes as much function calls as practical, in order to allow +// debug builds to run closer in speed and code footprint to release builds. +// - bitset doesn't support string functionality. We can add this if +// it is deemed useful. +// +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_BITSET_H +#define EASTL_BITSET_H + + +#include +#include + +EA_DISABLE_ALL_VC_WARNINGS(); + +#include +#include + +EA_RESTORE_ALL_VC_WARNINGS(); + +#if EASTL_EXCEPTIONS_ENABLED + EA_DISABLE_ALL_VC_WARNINGS(); + + #include // std::out_of_range, std::length_error. + + EA_RESTORE_ALL_VC_WARNINGS(); +#endif + +EA_DISABLE_VC_WARNING(4127); // Conditional expression is constant + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + // To consider: Enable this for backwards compatibility with any user code that might be using BitsetWordType: + // #define BitsetWordType EASTL_BITSET_WORD_TYPE_DEFAULT + + + /// BITSET_WORD_COUNT + /// + /// Defines the number of words we use, based on the number of bits. + /// nBitCount refers to the number of bits in a bitset. + /// WordType refers to the type of integer word which stores bitet data. By default it is BitsetWordType. + /// + /// Note: for nBitCount == 0, returns 1! + #if !defined(__GNUC__) || (__GNUC__ >= 3) // GCC 2.x can't handle the simpler declaration below. + #define BITSET_WORD_COUNT(nBitCount, WordType) (nBitCount == 0 ? 1 : ((nBitCount - 1) / (8 * sizeof(WordType)) + 1)) + #else + #define BITSET_WORD_COUNT(nBitCount, WordType) ((nBitCount - 1) / (8 * sizeof(WordType)) + 1) + #endif + + + /// EASTL_DISABLE_BITSET_ARRAYBOUNDS_WARNING + /// Before GCC 4.7 the '-Warray-bounds' buggy and was very likely to issue false positives for loops that are + /// difficult to evaluate. + /// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=45978 + /// + #if defined(__GNUC__) && (EA_COMPILER_VERSION > 4007) && defined(EA_PLATFORM_ANDROID) // Earlier than GCC 4.7 + #define EASTL_DISABLE_BITSET_ARRAYBOUNDS_WARNING 1 + #else + #define EASTL_DISABLE_BITSET_ARRAYBOUNDS_WARNING 0 + #endif + + template + class bitset; + + namespace detail + { + template + struct is_word_type : std::bool_constant && !is_volatile_v && !is_same_v && is_integral_v && is_unsigned_v> {}; + + template + constexpr bool is_word_type_v = is_word_type::value; + + // slices the min(N, UInt) lowest significant bits from value. + template + eastl::enable_if_t> from_unsigned_integral(bitset& bs, UInt value) + { + constexpr size_t numWords = (N > 0) ? ((N - 1) / (CHAR_BIT * sizeof(WordType)) + 1) : 0; // BITSET_WORD_COUNT(N, WordType) but 0 for N == 0 + + WordType* data = bs.data(); + + EA_CONSTEXPR_IF (numWords > 0) + { + // copy everything from value into our word array: + constexpr size_t bytes_to_copy = eastl::min_alt(numWords * sizeof(WordType), sizeof(UInt)); + memcpy(data, &value, bytes_to_copy); + + // zero any remaining elements in our array: + memset(reinterpret_cast(data) + bytes_to_copy, 0, numWords * sizeof(WordType) - bytes_to_copy); + + // we may have copied bits into the final element that are unusable (ie. bit positions > N). + // zero these bits out, as this is an invariant for our implementation. + EA_CONSTEXPR_IF (N % (CHAR_BIT * sizeof(WordType)) != 0) + { + constexpr WordType lastElemUsedBitsMask = (WordType(1) << (N % (CHAR_BIT * sizeof(WordType)))) - 1; + data[numWords - 1] &= lastElemUsedBitsMask; + } + } + else + { + data[0] = 0; // our bitset implementation has a single element even when N == 0. + } + } + + template + eastl::enable_if_t, UInt> to_unsigned_integral(const bitset& bs) + { + constexpr size_t numWords = (N > 0) ? ((N - 1) / (CHAR_BIT * sizeof(WordType)) + 1) : 0; // BITSET_WORD_COUNT(N, WordType) but 0 for N == 0 + + EA_CONSTEXPR_IF (numWords > 0) + { + const WordType* data = bs.data(); + + UInt result = 0; + + size_t numWordsCopied; + EA_CONSTEXPR_IF (sizeof(UInt) < sizeof(WordType)) + { + constexpr size_t bytes_to_copy = sizeof(UInt); + memcpy(&result, data, bytes_to_copy); + + // check remaining uncopied bits from the first word are zero: + constexpr WordType lastElemOverflowBitsMask = static_cast(~((WordType(1) << (CHAR_BIT * sizeof(UInt))) - 1)); + if ((data[0] & lastElemOverflowBitsMask) != 0) + { +#if EASTL_EXCEPTIONS_ENABLED + throw std::overflow_error("target type cannot represent the full bitset."); +#elif EASTL_ASSERT_ENABLED + EA_CONSTEXPR_IF(bAssertOnOverflow) + EASTL_FAIL_MSG("overflow_error"); +#endif + } + + numWordsCopied = 1; + } + else + { + constexpr size_t bytes_to_copy = eastl::min_alt(numWords * sizeof(WordType), sizeof(UInt)); + memcpy(&result, data, bytes_to_copy); + + numWordsCopied = bytes_to_copy / sizeof(WordType); + } + + // check any remaining uncopied words are zero (don't contain any useful information). + for (size_t wordIndex = numWordsCopied; wordIndex < numWords; ++wordIndex) + { + if (data[wordIndex] != 0) + { +#if EASTL_EXCEPTIONS_ENABLED + throw std::overflow_error("target type cannot represent the full bitset."); +#elif EASTL_ASSERT_ENABLED + EA_CONSTEXPR_IF (bAssertOnOverflow) + EASTL_FAIL_MSG("overflow_error"); +#endif + } + } + + return result; + } + else + { + return 0; + } + } + } // namespace detail + + /// BitsetBase + /// + /// This is a default implementation that works for any number of words. + /// + template // Templated on the number of words used to hold the bitset and the word type. + struct BitsetBase + { + typedef WordType word_type; + typedef BitsetBase this_type; + #if EASTL_BITSET_SIZE_T + typedef size_t size_type; + #else + typedef eastl_size_t size_type; + #endif + + enum { + kBitsPerWord = (8 * sizeof(word_type)), + kBitsPerWordMask = (kBitsPerWord - 1), + kBitsPerWordShift = ((kBitsPerWord == 8) ? 3 : ((kBitsPerWord == 16) ? 4 : ((kBitsPerWord == 32) ? 5 : (((kBitsPerWord == 64) ? 6 : 7))))) + }; + + public: + // invariant: we keep any high bits in the last word that are unneeded set to 0 + // so that our to_ulong() conversion can simply copy the words into the target type. + word_type mWord[NW]; + + public: + void operator&=(const this_type& x); + void operator|=(const this_type& x); + void operator^=(const this_type& x); + + void operator<<=(size_type n); + void operator>>=(size_type n); + + void flip(); + void set(); + void set(size_type i, bool value); + void reset(); + + bool operator==(const this_type& x) const; + + bool any() const; + size_type count() const; + + word_type& DoGetWord(size_type i); + word_type DoGetWord(size_type i) const; + + size_type DoFindFirst() const; + size_type DoFindNext(size_type last_find) const; + + size_type DoFindLast() const; // Returns NW * kBitsPerWord (the bit count) if no bits are set. + size_type DoFindPrev(size_type last_find) const; // Returns NW * kBitsPerWord (the bit count) if no bits are set. + + }; // class BitsetBase + + + + /// BitsetBase<1, WordType> + /// + /// This is a specialization for a bitset that fits within one word. + /// + template + struct BitsetBase<1, WordType> + { + typedef WordType word_type; + typedef BitsetBase<1, WordType> this_type; + #if EASTL_BITSET_SIZE_T + typedef size_t size_type; + #else + typedef eastl_size_t size_type; + #endif + + enum { + kBitsPerWord = (8 * sizeof(word_type)), + kBitsPerWordMask = (kBitsPerWord - 1), + kBitsPerWordShift = ((kBitsPerWord == 8) ? 3 : ((kBitsPerWord == 16) ? 4 : ((kBitsPerWord == 32) ? 5 : (((kBitsPerWord == 64) ? 6 : 7))))) + }; + + public: + word_type mWord[1]; // Defined as an array of 1 so that bitset can treat this BitsetBase like others. + + public: + void operator&=(const this_type& x); + void operator|=(const this_type& x); + void operator^=(const this_type& x); + + void operator<<=(size_type n); + void operator>>=(size_type n); + + void flip(); + void set(); + void set(size_type i, bool value); + void reset(); + + bool operator==(const this_type& x) const; + + bool any() const; + size_type count() const; + + word_type& DoGetWord(size_type); + word_type DoGetWord(size_type) const; + + size_type DoFindFirst() const; + size_type DoFindNext(size_type last_find) const; + + size_type DoFindLast() const; // Returns 1 * kBitsPerWord (the bit count) if no bits are set. + size_type DoFindPrev(size_type last_find) const; // Returns 1 * kBitsPerWord (the bit count) if no bits are set. + + }; // BitsetBase<1, WordType> + + + + /// BitsetBase<2, WordType> + /// + /// This is a specialization for a bitset that fits within two words. + /// The difference here is that we avoid branching (ifs and loops). + /// + template + struct BitsetBase<2, WordType> + { + typedef WordType word_type; + typedef BitsetBase<2, WordType> this_type; + #if EASTL_BITSET_SIZE_T + typedef size_t size_type; + #else + typedef eastl_size_t size_type; + #endif + + enum { + kBitsPerWord = (8 * sizeof(word_type)), + kBitsPerWordMask = (kBitsPerWord - 1), + kBitsPerWordShift = ((kBitsPerWord == 8) ? 3 : ((kBitsPerWord == 16) ? 4 : ((kBitsPerWord == 32) ? 5 : (((kBitsPerWord == 64) ? 6 : 7))))) + }; + + public: + word_type mWord[2]; + + public: + void operator&=(const this_type& x); + void operator|=(const this_type& x); + void operator^=(const this_type& x); + + void operator<<=(size_type n); + void operator>>=(size_type n); + + void flip(); + void set(); + void set(size_type i, bool value); + void reset(); + + bool operator==(const this_type& x) const; + + bool any() const; + size_type count() const; + + word_type& DoGetWord(size_type); + word_type DoGetWord(size_type) const; + + size_type DoFindFirst() const; + size_type DoFindNext(size_type last_find) const; + + size_type DoFindLast() const; // Returns 2 * kBitsPerWord (the bit count) if no bits are set. + size_type DoFindPrev(size_type last_find) const; // Returns 2 * kBitsPerWord (the bit count) if no bits are set. + + }; // BitsetBase<2, WordType> + + + + + /// bitset + /// + /// Implements a bitset much like the C++ std::bitset. + /// + /// As of this writing we don't implement a specialization of bitset<0>, + /// as it is deemed an academic exercise that nobody would actually + /// use and it would increase code space and provide little practical + /// benefit. Note that this doesn't mean bitset<0> isn't supported; + /// it means that our version of it isn't as efficient as it would be + /// if a specialization was made for it. + /// + /// - N can be any unsigned (non-zero) value, though memory usage is + /// linear with respect to N, so large values of N use large amounts of memory. + /// - WordType must be a non-cv qualified unsigned integral other than bool. + /// By default the WordType is the largest native register type that the + /// target platform supports. + /// + template + class bitset : private BitsetBase + { + public: + static_assert(detail::is_word_type_v, "Word type must be a non-cv qualified, unsigned integral other than bool."); + + typedef BitsetBase base_type; + typedef bitset this_type; + typedef WordType word_type; + typedef typename base_type::size_type size_type; + + enum + { + kBitsPerWord = (8 * sizeof(word_type)), + kBitsPerWordMask = (kBitsPerWord - 1), + kBitsPerWordShift = ((kBitsPerWord == 8) ? 3 : ((kBitsPerWord == 16) ? 4 : ((kBitsPerWord == 32) ? 5 : (((kBitsPerWord == 64) ? 6 : 7))))), + kSize = N, // The number of bits the bitset holds + kWordSize = sizeof(word_type), // The size of individual words the bitset uses to hold the bits. + kWordCount = BITSET_WORD_COUNT(N, WordType) // The number of words the bitset uses to hold the bits. sizeof(bitset) == kWordSize * kWordCount. + }; + + // internal implementation details. do not use. + using base_type::mWord; + using base_type::DoGetWord; + using base_type::DoFindFirst; + using base_type::DoFindNext; + using base_type::DoFindLast; + using base_type::DoFindPrev; + + using base_type::count; + using base_type::any; + + public: + /// reference + /// + /// A reference is a reference to a specific bit in the bitset. + /// The C++ standard specifies that this be a nested class, + /// though it is not clear if a non-nested reference implementation + /// would be non-conforming. + /// + class reference + { + protected: + friend class bitset; + + word_type* mpBitWord; + size_type mnBitIndex; + + reference(){} // The C++ standard specifies that this is private. + + public: + reference(const bitset& x, size_type i); + + reference& operator=(bool value); + reference& operator=(const reference& x); + + bool operator~() const; + operator bool() const // Defined inline because CodeWarrior fails to be able to compile it outside. + { return (*mpBitWord & (static_cast(1) << (mnBitIndex & kBitsPerWordMask))) != 0; } + + reference& flip(); + }; + + public: + friend class reference; + + bitset(); + +#if EA_IS_ENABLED(EASTL_DEPRECATIONS_FOR_2024_SEPT) + // note: this constructor will only copy the minimum of N or unsigned long long's size least significant bits. + bitset(unsigned long long value); +#else + bitset(uint32_t value); +#endif + + // We don't define copy constructor and operator= because + // the compiler-generated versions will suffice. + + this_type& operator&=(const this_type& x); + this_type& operator|=(const this_type& x); + this_type& operator^=(const this_type& x); + + this_type& operator<<=(size_type n); + this_type& operator>>=(size_type n); + + this_type& set(); + this_type& set(size_type i, bool value = true); + + this_type& reset(); + this_type& reset(size_type i); + + this_type& flip(); + this_type& flip(size_type i); + this_type operator~() const; + + reference operator[](size_type i); + bool operator[](size_type i) const; + + const word_type* data() const; + word_type* data(); + + // Deprecated: use the bitset(unsigned long long) constructor instead. + // this was a workaround for when our constructor was defined as bitset(uint32_t) and could cause a narrowing conversion. + EASTL_REMOVE_AT_2024_SEPT void from_uint32(uint32_t value); + EASTL_REMOVE_AT_2024_SEPT void from_uint64(uint64_t value); + + /// to_xxx() + /// + /// Not recommended: Use one of + /// as_xxx() which is a compile time error if the target type cannot represent the entire bitset, or + /// to_xxx_assert_convertible() which is the standard conformant version of this function, or + /// to_xxx_no_assert_convertible() which has the same behaviour, explicit naming + /// + /// Different from the standard: + /// Does *NOT* assert that the bitset can be represented as the target integer type (has bits set outside the target type). + /// However, if exceptions are enabled, it does throw an exception if the bitset cannot be represented as the target integer type. + unsigned long to_ulong() const; + uint32_t to_uint32() const; + uint64_t to_uint64() const; + + /// to_xxx_assert_convertible() + /// + /// Equivalent to the standard library's to_ulong() / to_ullong(). + /// Asserts / throws an exception if the bitset cannot be represented as the target integer type. + uint32_t to_uint32_assert_convertible() const { return detail::to_unsigned_integral(*this); } + uint64_t to_uint64_assert_convertible() const { return detail::to_unsigned_integral(*this); } + unsigned long to_ulong_assert_convertible() const { return detail::to_unsigned_integral(*this); } + unsigned long long to_ullong_assert_convertible() const { return detail::to_unsigned_integral(*this); } + + /// to_xxx_no_assert_convertible() + /// + /// Prefer to_xxx_assert_convertible() instead of these functions. + /// + /// Different from the standard: + /// Does *NOT* assert that the bitset can be represented as the target integer type (has bits set outside the target type). + /// However, if exceptions are enabled, it does throw an exception if the bitset cannot be represented as the target integer type. + uint32_t to_uint32_no_assert_convertible() const { return detail::to_unsigned_integral(*this); } + uint64_t to_uint64_no_assert_convertible() const { return detail::to_unsigned_integral(*this); } + unsigned long to_ulong_no_assert_convertible() const { return detail::to_unsigned_integral(*this); } + unsigned long long to_ullong_no_assert_convertible() const { return detail::to_unsigned_integral(*this); } + + /// as_uint() / as_xxx() + /// + /// Extension to the standard: Cast to a unsigned integral that can represent the entire bitset. + /// If the target type cannot represent the entire bitset, then issue a compile error (overload does not exist). + /// Never throws / asserts. + template + eastl::enable_if_t && N <= (CHAR_BIT * sizeof(UInt)), UInt> as_uint() const noexcept { return detail::to_unsigned_integral(*this); } + + template + eastl::enable_if_t as_uint32() const noexcept { return to_uint32_assert_convertible(); } + template + eastl::enable_if_t as_uint64() const noexcept { return to_uint64_assert_convertible(); } + template + eastl::enable_if_t as_ulong() const noexcept { return to_ulong_assert_convertible(); } + template + eastl::enable_if_t as_ullong() const noexcept { return to_ullong_assert_convertible(); } + + //size_type count() const; // We inherit this from the base class. + size_type size() const; + + bool operator==(const this_type& x) const; +#if !defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON) + bool operator!=(const this_type& x) const; +#endif + + bool test(size_type i) const; + //bool any() const; // We inherit this from the base class. + bool all() const; + bool none() const; + + this_type operator<<(size_type n) const; + this_type operator>>(size_type n) const; + + // Finds the index of the first "on" bit, returns kSize if none are set. + size_type find_first() const; + + // Finds the index of the next "on" bit after last_find, returns kSize if none are set. + size_type find_next(size_type last_find) const; + + // Finds the index of the last "on" bit, returns kSize if none are set. + size_type find_last() const; + + // Finds the index of the last "on" bit before last_find, returns kSize if none are set. + size_type find_prev(size_type last_find) const; + + }; // bitset + + + + + + + + /// BitsetCountBits + /// + /// This is a fast trick way to count bits without branches nor memory accesses. + /// + template + eastl::enable_if_t && sizeof(UInt64) == 8, uint32_t> BitsetCountBits(UInt64 x) + { + // GCC 3.x's implementation of UINT64_C is broken and fails to deal with + // the code below correctly. So we make a workaround for it. Earlier and + // later versions of GCC don't have this bug. + + #if defined(__GNUC__) && (__GNUC__ == 3) + x = x - ((x >> 1) & 0x5555555555555555ULL); + x = (x & 0x3333333333333333ULL) + ((x >> 2) & 0x3333333333333333ULL); + x = (x + (x >> 4)) & 0x0F0F0F0F0F0F0F0FULL; + return (uint32_t)((x * 0x0101010101010101ULL) >> 56); + #else + x = x - ((x >> 1) & UINT64_C(0x5555555555555555)); + x = (x & UINT64_C(0x3333333333333333)) + ((x >> 2) & UINT64_C(0x3333333333333333)); + x = (x + (x >> 4)) & UINT64_C(0x0F0F0F0F0F0F0F0F); + return (uint32_t)((x * UINT64_C(0x0101010101010101)) >> 56); + #endif + } + + template + eastl::enable_if_t && sizeof(UInt32) == 4, uint32_t> BitsetCountBits(UInt32 x) + { + x = x - ((x >> 1) & 0x55555555); + x = (x & 0x33333333) + ((x >> 2) & 0x33333333); + x = (x + (x >> 4)) & 0x0F0F0F0F; + return (uint32_t)((x * 0x01010101) >> 24); + } + + template + eastl::enable_if_t< detail::is_word_type_v && sizeof(SmallUInt) < 4, uint32_t> BitsetCountBits(SmallUInt x) + { + return BitsetCountBits((uint32_t)x); + } + + + // const static char kBitsPerUint16[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4 }; + #define EASTL_BITSET_COUNT_STRING "\0\1\1\2\1\2\2\3\1\2\2\3\2\3\3\4" + + + template + eastl::enable_if_t && sizeof(UInt8) == 1, uint32_t> GetFirstBit(UInt8 x) + { + if(x) + { + uint32_t n = 1; + + if((x & 0x0000000F) == 0) { n += 4; x >>= 4; } + if((x & 0x00000003) == 0) { n += 2; x >>= 2; } + + return (uint32_t)(n - (x & 1)); + } + + return 8; + } + + // To do: Update this to use VC++ _BitScanForward, _BitScanForward64; + // GCC __builtin_ctz, __builtin_ctzl. + // VC++ __lzcnt16, __lzcnt, __lzcnt64 requires recent CPUs (2013+) and probably can't be used. + // http://en.wikipedia.org/wiki/Haswell_%28microarchitecture%29#New_features + template + eastl::enable_if_t && sizeof(UInt16) == 2, uint32_t> GetFirstBit(UInt16 x) + { + if(x) + { + uint32_t n = 1; + + if((x & 0x000000FF) == 0) { n += 8; x >>= 8; } + if((x & 0x0000000F) == 0) { n += 4; x >>= 4; } + if((x & 0x00000003) == 0) { n += 2; x >>= 2; } + + return (uint32_t)(n - (x & 1)); + } + + return 16; + } + + template + eastl::enable_if_t && sizeof(UInt32) == 4, uint32_t> GetFirstBit(UInt32 x) + { +#if defined(EA_COMPILER_MSVC) && (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64)) + // This has been benchmarked as significantly faster than the generic code below. + unsigned char isNonZero; + unsigned long index; + isNonZero = _BitScanForward(&index, x); + return isNonZero ? (int)index : 32; +#elif (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG)) && !defined(EA_COMPILER_EDG) + if (x) + return __builtin_ctz(x); + return 32; +#else + if(x) + { + uint32_t n = 1; + + if((x & 0x0000FFFF) == 0) { n += 16; x >>= 16; } + if((x & 0x000000FF) == 0) { n += 8; x >>= 8; } + if((x & 0x0000000F) == 0) { n += 4; x >>= 4; } + if((x & 0x00000003) == 0) { n += 2; x >>= 2; } + + return (n - (x & 1)); + } + + return 32; +#endif + } + + template + eastl::enable_if_t && sizeof(UInt64) == 8, uint32_t> GetFirstBit(UInt64 x) + { +#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86_64) + // This has been benchmarked as significantly faster than the generic code below. + unsigned char isNonZero; + unsigned long index; + isNonZero = _BitScanForward64(&index, x); + return isNonZero ? (int)index : 64; +#elif (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG)) && !defined(EA_COMPILER_EDG) + if (x) + return __builtin_ctzll(x); + return 64; +#else + if(x) + { + uint32_t n = 1; + + if((x & 0xFFFFFFFF) == 0) { n += 32; x >>= 32; } + if((x & 0x0000FFFF) == 0) { n += 16; x >>= 16; } + if((x & 0x000000FF) == 0) { n += 8; x >>= 8; } + if((x & 0x0000000F) == 0) { n += 4; x >>= 4; } + if((x & 0x00000003) == 0) { n += 2; x >>= 2; } + + return (n - ((uint32_t)x & 1)); + } + + return 64; +#endif + } + + + #if EASTL_INT128_SUPPORTED + inline uint32_t GetFirstBit(eastl_uint128_t x) + { + if(x) + { + uint32_t n = 1; + + if((x & UINT64_C(0xFFFFFFFFFFFFFFFF)) == 0) { n += 64; x >>= 64; } + if((x & 0xFFFFFFFF) == 0) { n += 32; x >>= 32; } + if((x & 0x0000FFFF) == 0) { n += 16; x >>= 16; } + if((x & 0x000000FF) == 0) { n += 8; x >>= 8; } + if((x & 0x0000000F) == 0) { n += 4; x >>= 4; } + if((x & 0x00000003) == 0) { n += 2; x >>= 2; } + + return (n - ((uint32_t)x & 1)); + } + + return 128; + } + #endif + + template + eastl::enable_if_t && sizeof(UInt8) == 1, uint32_t> GetLastBit(UInt8 x) + { + if(x) + { + uint32_t n = 0; + + if(x & 0xFFF0) { n += 4; x >>= 4; } + if(x & 0xFFFC) { n += 2; x >>= 2; } + if(x & 0xFFFE) { n += 1; } + + return n; + } + + return 8; + } + + template + eastl::enable_if_t && sizeof(UInt16) == 2, uint32_t> GetLastBit(UInt16 x) + { + if(x) + { + uint32_t n = 0; + + if(x & 0xFF00) { n += 8; x >>= 8; } + if(x & 0xFFF0) { n += 4; x >>= 4; } + if(x & 0xFFFC) { n += 2; x >>= 2; } + if(x & 0xFFFE) { n += 1; } + + return n; + } + + return 16; + } + + template + eastl::enable_if_t && sizeof(UInt32) == 4, uint32_t> GetLastBit(UInt32 x) + { +#if defined(EA_COMPILER_MSVC) && (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64)) + // This has been benchmarked as significantly faster than the generic code below. + unsigned char isNonZero; + unsigned long index; + isNonZero = _BitScanReverse(&index, x); + return isNonZero ? (int)index : 32; +#elif (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG)) && !defined(EA_COMPILER_EDG) + if (x) + return 31 - __builtin_clz(x); + return 32; +#else + if(x) + { + uint32_t n = 0; + + if(x & 0xFFFF0000) { n += 16; x >>= 16; } + if(x & 0xFFFFFF00) { n += 8; x >>= 8; } + if(x & 0xFFFFFFF0) { n += 4; x >>= 4; } + if(x & 0xFFFFFFFC) { n += 2; x >>= 2; } + if(x & 0xFFFFFFFE) { n += 1; } + + return n; + } + + return 32; +#endif + } + + template + eastl::enable_if_t && sizeof(UInt64) == 8, uint32_t> GetLastBit(UInt64 x) + { +#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86_64) + // This has been benchmarked as significantly faster than the generic code below. + unsigned char isNonZero; + unsigned long index; + isNonZero = _BitScanReverse64(&index, x); + return isNonZero ? (int)index : 64; +#elif (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG)) && !defined(EA_COMPILER_EDG) + if (x) + return 63 - __builtin_clzll(x); + return 64; +#else + if(x) + { + uint32_t n = 0; + + if(x & UINT64_C(0xFFFFFFFF00000000)) { n += 32; x >>= 32; } + if(x & 0xFFFF0000) { n += 16; x >>= 16; } + if(x & 0xFFFFFF00) { n += 8; x >>= 8; } + if(x & 0xFFFFFFF0) { n += 4; x >>= 4; } + if(x & 0xFFFFFFFC) { n += 2; x >>= 2; } + if(x & 0xFFFFFFFE) { n += 1; } + + return n; + } + + return 64; +#endif + } + + #if EASTL_INT128_SUPPORTED + inline uint32_t GetLastBit(eastl_uint128_t x) + { + if(x) + { + uint32_t n = 0; + + eastl_uint128_t mask(UINT64_C(0xFFFFFFFFFFFFFFFF)); // There doesn't seem to exist compiler support for INT128_C() by any compiler. EAStdC's int128_t supports it though. + mask <<= 64; + + if(x & mask) { n += 64; x >>= 64; } + if(x & UINT64_C(0xFFFFFFFF00000000)) { n += 32; x >>= 32; } + if(x & UINT64_C(0x00000000FFFF0000)) { n += 16; x >>= 16; } + if(x & UINT64_C(0x00000000FFFFFF00)) { n += 8; x >>= 8; } + if(x & UINT64_C(0x00000000FFFFFFF0)) { n += 4; x >>= 4; } + if(x & UINT64_C(0x00000000FFFFFFFC)) { n += 2; x >>= 2; } + if(x & UINT64_C(0x00000000FFFFFFFE)) { n += 1; } + + return n; + } + + return 128; + } + #endif + + + + + /////////////////////////////////////////////////////////////////////////// + // BitsetBase + // + // We tried two forms of array access here: + // for(word_type *pWord(mWord), *pWordEnd(mWord + NW); pWord < pWordEnd; ++pWord) + // *pWord = ... + // and + // for(size_t i = 0; i < NW; i++) + // mWord[i] = ... + // + // For our tests (~NW < 16), the latter (using []) access resulted in faster code. + /////////////////////////////////////////////////////////////////////////// + + + template + inline void BitsetBase::operator&=(const this_type& x) + { + for(size_t i = 0; i < NW; i++) + mWord[i] &= x.mWord[i]; + } + + + template + inline void BitsetBase::operator|=(const this_type& x) + { + for(size_t i = 0; i < NW; i++) + mWord[i] |= x.mWord[i]; + } + + + template + inline void BitsetBase::operator^=(const this_type& x) + { + for(size_t i = 0; i < NW; i++) + mWord[i] ^= x.mWord[i]; + } + + + template + inline void BitsetBase::operator<<=(size_type n) + { + const size_type nWordShift = (size_type)(n >> kBitsPerWordShift); + + if(nWordShift) + { + for(int i = (int)(NW - 1); i >= 0; --i) + mWord[i] = (nWordShift <= (size_type)i) ? mWord[i - nWordShift] : (word_type)0; + } + + if(n &= kBitsPerWordMask) + { + for(size_t i = (NW - 1); i > 0; --i) + mWord[i] = (word_type)((mWord[i] << n) | (mWord[i - 1] >> (kBitsPerWord - n))); + mWord[0] <<= n; + } + + // We let the parent class turn off any upper bits. + } + + + template + inline void BitsetBase::operator>>=(size_type n) + { + const size_type nWordShift = (size_type)(n >> kBitsPerWordShift); + + if(nWordShift) + { + for(size_t i = 0; i < NW; ++i) + mWord[i] = ((nWordShift < (NW - i)) ? mWord[i + nWordShift] : (word_type)0); + } + + if(n &= kBitsPerWordMask) + { + for(size_t i = 0; i < (NW - 1); ++i) + mWord[i] = (word_type)((mWord[i] >> n) | (mWord[i + 1] << (kBitsPerWord - n))); + mWord[NW - 1] >>= n; + } + } + + + template + inline void BitsetBase::flip() + { + for(size_t i = 0; i < NW; i++) + mWord[i] = ~mWord[i]; + // We let the parent class turn off any upper bits. + } + + + template + inline void BitsetBase::set() + { + for(size_t i = 0; i < NW; i++) + mWord[i] = static_cast(~static_cast(0)); + // We let the parent class turn off any upper bits. + } + + + template + inline void BitsetBase::set(size_type i, bool value) + { + if(value) + mWord[i >> kBitsPerWordShift] |= (static_cast(1) << (i & kBitsPerWordMask)); + else + mWord[i >> kBitsPerWordShift] &= ~(static_cast(1) << (i & kBitsPerWordMask)); + } + + + template + inline void BitsetBase::reset() + { + if(NW > 16) // This is a constant expression and should be optimized away. + { + // This will be fastest if compiler intrinsic function optimizations are enabled. + memset(mWord, 0, sizeof(mWord)); + } + else + { + for(size_t i = 0; i < NW; i++) + mWord[i] = 0; + } + } + + + template + inline bool BitsetBase::operator==(const this_type& x) const + { + for(size_t i = 0; i < NW; i++) + { + if(mWord[i] != x.mWord[i]) + return false; + } + return true; + } + + + template + inline bool BitsetBase::any() const + { + for(size_t i = 0; i < NW; i++) + { + if(mWord[i]) + return true; + } + return false; + } + + + template + inline typename BitsetBase::size_type + BitsetBase::count() const + { + size_type n = 0; + + for(size_t i = 0; i < NW; i++) + { + #if defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 304) && !defined(EA_PLATFORM_ANDROID) // GCC 3.4 or later + #if(EA_PLATFORM_WORD_SIZE == 4) + n += (size_type)__builtin_popcountl(mWord[i]); + #else + n += (size_type)__builtin_popcountll(mWord[i]); + #endif + #elif defined(__GNUC__) && (__GNUC__ < 3) + n += BitsetCountBits(mWord[i]); // GCC 2.x compiler inexplicably blows up on the code below. + #else + // todo: use __popcnt16, __popcnt, __popcnt64 for msvc builds + // https://msdn.microsoft.com/en-us/library/bb385231(v=vs.140).aspx + for(word_type w = mWord[i]; w; w >>= 4) + n += EASTL_BITSET_COUNT_STRING[w & 0xF]; + + // Version which seems to run slower in benchmarks: + // n += BitsetCountBits(mWord[i]); + #endif + + } + return n; + } + + + template + inline typename BitsetBase::word_type& + BitsetBase::DoGetWord(size_type i) + { + return mWord[i >> kBitsPerWordShift]; + } + + + template + inline typename BitsetBase::word_type + BitsetBase::DoGetWord(size_type i) const + { + return mWord[i >> kBitsPerWordShift]; + } + + + template + inline typename BitsetBase::size_type + BitsetBase::DoFindFirst() const + { + for(size_type word_index = 0; word_index < NW; ++word_index) + { + const size_type fbiw = GetFirstBit(mWord[word_index]); + + if(fbiw != kBitsPerWord) + return (word_index * kBitsPerWord) + fbiw; + } + + return (size_type)NW * kBitsPerWord; + } + + +#if EASTL_DISABLE_BITSET_ARRAYBOUNDS_WARNING +EA_DISABLE_GCC_WARNING(-Warray-bounds) +#endif + + template + inline typename BitsetBase::size_type + BitsetBase::DoFindNext(size_type last_find) const + { + // Start looking from the next bit. + ++last_find; + + // Set initial state based on last find. + size_type word_index = static_cast(last_find >> kBitsPerWordShift); + size_type bit_index = static_cast(last_find & kBitsPerWordMask); + + // To do: There probably is a more elegant way to write looping below. + if(word_index < NW) + { + // Mask off previous bits of the word so our search becomes a "find first". + word_type this_word = mWord[word_index] & (static_cast(~0) << bit_index); + + for(;;) + { + const size_type fbiw = GetFirstBit(this_word); + + if(fbiw != kBitsPerWord) + return (word_index * kBitsPerWord) + fbiw; + + if(++word_index < NW) + this_word = mWord[word_index]; + else + break; + } + } + + return (size_type)NW * kBitsPerWord; + } + +#if EASTL_DISABLE_BITSET_ARRAYBOUNDS_WARNING +EA_RESTORE_GCC_WARNING() +#endif + + + + template + inline typename BitsetBase::size_type + BitsetBase::DoFindLast() const + { + for(size_type word_index = (size_type)NW; word_index > 0; --word_index) + { + const size_type lbiw = GetLastBit(mWord[word_index - 1]); + + if(lbiw != kBitsPerWord) + return ((word_index - 1) * kBitsPerWord) + lbiw; + } + + return (size_type)NW * kBitsPerWord; + } + + + template + inline typename BitsetBase::size_type + BitsetBase::DoFindPrev(size_type last_find) const + { + if(last_find > 0) + { + // Set initial state based on last find. + size_type word_index = static_cast(last_find >> kBitsPerWordShift); + size_type bit_index = static_cast(last_find & kBitsPerWordMask); + + // Mask off subsequent bits of the word so our search becomes a "find last". + // We do two shifts here because it's undefined behaviour to right shift greater than or equal to the number of bits in the integer. + // + // Note: operator~() is an arithmetic operator and performs integral promotions, ie. small integrals are promoted to an int. + // Because the promotion is before applying operator~() we need to cast back to our word type otherwise we end up with extraneous set bits. + word_type mask = (static_cast(~static_cast(0)) >> (kBitsPerWord - 1 - bit_index)) >> 1; + word_type this_word = mWord[word_index] & mask; + + for(;;) + { + const size_type lbiw = GetLastBit(this_word); + + if(lbiw != kBitsPerWord) + return (word_index * kBitsPerWord) + lbiw; + + if(word_index > 0) + this_word = mWord[--word_index]; + else + break; + } + } + + return (size_type)NW * kBitsPerWord; + } + + + + /////////////////////////////////////////////////////////////////////////// + // BitsetBase<1, WordType> + /////////////////////////////////////////////////////////////////////////// + + + template + inline void BitsetBase<1, WordType>::operator&=(const this_type& x) + { + mWord[0] &= x.mWord[0]; + } + + + template + inline void BitsetBase<1, WordType>::operator|=(const this_type& x) + { + mWord[0] |= x.mWord[0]; + } + + + template + inline void BitsetBase<1, WordType>::operator^=(const this_type& x) + { + mWord[0] ^= x.mWord[0]; + } + + + template + inline void BitsetBase<1, WordType>::operator<<=(size_type n) + { + mWord[0] <<= n; + // We let the parent class turn off any upper bits. + } + + + template + inline void BitsetBase<1, WordType>::operator>>=(size_type n) + { + mWord[0] >>= n; + } + + + template + inline void BitsetBase<1, WordType>::flip() + { + mWord[0] = ~mWord[0]; + // We let the parent class turn off any upper bits. + } + + + template + inline void BitsetBase<1, WordType>::set() + { + mWord[0] = static_cast(~static_cast(0)); + // We let the parent class turn off any upper bits. + } + + + template + inline void BitsetBase<1, WordType>::set(size_type i, bool value) + { + if(value) + mWord[0] |= (static_cast(1) << i); + else + mWord[0] &= ~(static_cast(1) << i); + } + + + template + inline void BitsetBase<1, WordType>::reset() + { + mWord[0] = 0; + } + + + template + inline bool BitsetBase<1, WordType>::operator==(const this_type& x) const + { + return mWord[0] == x.mWord[0]; + } + + + template + inline bool BitsetBase<1, WordType>::any() const + { + return mWord[0] != 0; + } + + + template + inline typename BitsetBase<1, WordType>::size_type + BitsetBase<1, WordType>::count() const + { + #if defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 304) && !defined(EA_PLATFORM_ANDROID) // GCC 3.4 or later + #if(EA_PLATFORM_WORD_SIZE == 4) + return (size_type)__builtin_popcountl(mWord[0]); + #else + return (size_type)__builtin_popcountll(mWord[0]); + #endif + #elif defined(__GNUC__) && (__GNUC__ < 3) + return BitsetCountBits(mWord[0]); // GCC 2.x compiler inexplicably blows up on the code below. + #else + size_type n = 0; + for(word_type w = mWord[0]; w; w >>= 4) + n += EASTL_BITSET_COUNT_STRING[w & 0xF]; + return n; + #endif + } + + + template + inline typename BitsetBase<1, WordType>::word_type& + BitsetBase<1, WordType>::DoGetWord(size_type) + { + return mWord[0]; + } + + + template + inline typename BitsetBase<1, WordType>::word_type + BitsetBase<1, WordType>::DoGetWord(size_type) const + { + return mWord[0]; + } + + + template + inline typename BitsetBase<1, WordType>::size_type + BitsetBase<1, WordType>::DoFindFirst() const + { + return GetFirstBit(mWord[0]); + } + + + template + inline typename BitsetBase<1, WordType>::size_type + BitsetBase<1, WordType>::DoFindNext(size_type last_find) const + { + if(++last_find < kBitsPerWord) + { + // Mask off previous bits of word so our search becomes a "find first". + const word_type this_word = mWord[0] & (static_cast(~0) << last_find); + + return GetFirstBit(this_word); + } + + return kBitsPerWord; + } + + + template + inline typename BitsetBase<1, WordType>::size_type + BitsetBase<1, WordType>::DoFindLast() const + { + return GetLastBit(mWord[0]); + } + + + template + inline typename BitsetBase<1, WordType>::size_type + BitsetBase<1, WordType>::DoFindPrev(size_type last_find) const + { + if(last_find > 0) + { + // Mask off previous bits of word so our search becomes a "find first". + const word_type this_word = mWord[0] & (static_cast(~static_cast(0)) >> (kBitsPerWord - last_find)); + + return GetLastBit(this_word); + } + + return kBitsPerWord; + } + + + + + /////////////////////////////////////////////////////////////////////////// + // BitsetBase<2, WordType> + /////////////////////////////////////////////////////////////////////////// + + + template + inline void BitsetBase<2, WordType>::operator&=(const this_type& x) + { + mWord[0] &= x.mWord[0]; + mWord[1] &= x.mWord[1]; + } + + + template + inline void BitsetBase<2, WordType>::operator|=(const this_type& x) + { + mWord[0] |= x.mWord[0]; + mWord[1] |= x.mWord[1]; + } + + + template + inline void BitsetBase<2, WordType>::operator^=(const this_type& x) + { + mWord[0] ^= x.mWord[0]; + mWord[1] ^= x.mWord[1]; + } + + + template + inline void BitsetBase<2, WordType>::operator<<=(size_type n) + { + if(n) // to avoid a shift by kBitsPerWord, which is undefined + { + if(EASTL_UNLIKELY(n >= kBitsPerWord)) // parent expected to handle high bits and n >= 64 + { + mWord[1] = mWord[0]; + mWord[0] = 0; + n -= kBitsPerWord; + } + + mWord[1] = (mWord[1] << n) | (mWord[0] >> (kBitsPerWord - n)); // Intentionally use | instead of +. + mWord[0] <<= n; + // We let the parent class turn off any upper bits. + } + } + + + template + inline void BitsetBase<2, WordType>::operator>>=(size_type n) + { + if(n) // to avoid a shift by kBitsPerWord, which is undefined + { + if(EASTL_UNLIKELY(n >= kBitsPerWord)) // parent expected to handle n >= 64 + { + mWord[0] = mWord[1]; + mWord[1] = 0; + n -= kBitsPerWord; + } + + mWord[0] = (mWord[0] >> n) | (mWord[1] << (kBitsPerWord - n)); // Intentionally use | instead of +. + mWord[1] >>= n; + } + } + + + template + inline void BitsetBase<2, WordType>::flip() + { + mWord[0] = ~mWord[0]; + mWord[1] = ~mWord[1]; + // We let the parent class turn off any upper bits. + } + + + template + inline void BitsetBase<2, WordType>::set() + { + EA_DISABLE_VC_WARNING(4245); // '=': conversion from 'int' to 'unsigned short', signed/unsigned mismatch + // https://learn.microsoft.com/en-us/cpp/error-messages/compiler-warnings/compiler-warning-level-4-c4245?view=msvc-170 + // MSVC incorrectly believes 0 is a negative value. + mWord[0] = ~static_cast(0); + mWord[1] = ~static_cast(0); + EA_RESTORE_VC_WARNING(); + // We let the parent class turn off any upper bits. + } + + + template + inline void BitsetBase<2, WordType>::set(size_type i, bool value) + { + if(value) + mWord[i >> kBitsPerWordShift] |= (static_cast(1) << (i & kBitsPerWordMask)); + else + mWord[i >> kBitsPerWordShift] &= ~(static_cast(1) << (i & kBitsPerWordMask)); + } + + + template + inline void BitsetBase<2, WordType>::reset() + { + mWord[0] = 0; + mWord[1] = 0; + } + + + template + inline bool BitsetBase<2, WordType>::operator==(const this_type& x) const + { + return (mWord[0] == x.mWord[0]) && (mWord[1] == x.mWord[1]); + } + + + template + inline bool BitsetBase<2, WordType>::any() const + { + // Or with two branches: { return (mWord[0] != 0) || (mWord[1] != 0); } + return (mWord[0] | mWord[1]) != 0; + } + + template + inline typename BitsetBase<2, WordType>::size_type + BitsetBase<2, WordType>::count() const + { + #if (defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 304)) || defined(__clang__) // GCC 3.4 or later + #if(EA_PLATFORM_WORD_SIZE == 4) + return (size_type)__builtin_popcountl(mWord[0]) + (size_type)__builtin_popcountl(mWord[1]); + #else + return (size_type)__builtin_popcountll(mWord[0]) + (size_type)__builtin_popcountll(mWord[1]); + #endif + + #else + return BitsetCountBits(mWord[0]) + BitsetCountBits(mWord[1]); + #endif + } + + + template + inline typename BitsetBase<2, WordType>::word_type& + BitsetBase<2, WordType>::DoGetWord(size_type i) + { + return mWord[i >> kBitsPerWordShift]; + } + + + template + inline typename BitsetBase<2, WordType>::word_type + BitsetBase<2, WordType>::DoGetWord(size_type i) const + { + return mWord[i >> kBitsPerWordShift]; + } + + + template + inline typename BitsetBase<2, WordType>::size_type + BitsetBase<2, WordType>::DoFindFirst() const + { + size_type fbiw = GetFirstBit(mWord[0]); + + if(fbiw != kBitsPerWord) + return fbiw; + + fbiw = GetFirstBit(mWord[1]); + + if(fbiw != kBitsPerWord) + return kBitsPerWord + fbiw; + + return 2 * kBitsPerWord; + } + + + template + inline typename BitsetBase<2, WordType>::size_type + BitsetBase<2, WordType>::DoFindNext(size_type last_find) const + { + // If the last find was in the first word, we must check it and then possibly the second. + if(++last_find < (size_type)kBitsPerWord) + { + // Mask off previous bits of word so our search becomes a "find first". + word_type this_word = mWord[0] & (static_cast(~0) << last_find); + + // Step through words. + size_type fbiw = GetFirstBit(this_word); + + if(fbiw != kBitsPerWord) + return fbiw; + + fbiw = GetFirstBit(mWord[1]); + + if(fbiw != kBitsPerWord) + return kBitsPerWord + fbiw; + } + else if(last_find < (size_type)(2 * kBitsPerWord)) + { + // The last find was in the second word, remove the bit count of the first word from the find. + last_find -= kBitsPerWord; + + // Mask off previous bits of word so our search becomes a "find first". + word_type this_word = mWord[1] & (static_cast(~0) << last_find); + + const size_type fbiw = GetFirstBit(this_word); + + if(fbiw != kBitsPerWord) + return kBitsPerWord + fbiw; + } + + return 2 * kBitsPerWord; + } + + + template + inline typename BitsetBase<2, WordType>::size_type + BitsetBase<2, WordType>::DoFindLast() const + { + size_type lbiw = GetLastBit(mWord[1]); + + if(lbiw != kBitsPerWord) + return kBitsPerWord + lbiw; + + lbiw = GetLastBit(mWord[0]); + + if(lbiw != kBitsPerWord) + return lbiw; + + return 2 * kBitsPerWord; + } + + + template + inline typename BitsetBase<2, WordType>::size_type + BitsetBase<2, WordType>::DoFindPrev(size_type last_find) const + { + // If the last find was in the second word, we must check it and then possibly the first. + if(last_find > (size_type)kBitsPerWord) + { + // This has the same effect as last_find %= kBitsPerWord in our case. + last_find -= kBitsPerWord; + + // Mask off previous bits of word so our search becomes a "find first". + word_type this_word = mWord[1] & (static_cast(~static_cast(0)) >> (kBitsPerWord - last_find)); + + // Step through words. + size_type lbiw = GetLastBit(this_word); + + if(lbiw != kBitsPerWord) + return kBitsPerWord + lbiw; + + lbiw = GetLastBit(mWord[0]); + + if(lbiw != kBitsPerWord) + return lbiw; + } + else if(last_find != 0) + { + // Mask off previous bits of word so our search becomes a "find first". + word_type this_word = mWord[0] & (static_cast(~static_cast(0)) >> (kBitsPerWord - last_find)); + + const size_type lbiw = GetLastBit(this_word); + + if(lbiw != kBitsPerWord) + return lbiw; + } + + return 2 * kBitsPerWord; + } + + + + /////////////////////////////////////////////////////////////////////////// + // bitset::reference + /////////////////////////////////////////////////////////////////////////// + + template + inline bitset::reference::reference(const bitset& x, size_type i) + : mpBitWord(&const_cast(x).DoGetWord(i)), + mnBitIndex(i & kBitsPerWordMask) + { // We have an issue here because the above is casting away the const-ness of the source bitset. + // Empty + } + + + template + inline typename bitset::reference& + bitset::reference::operator=(bool value) + { + if(value) + *mpBitWord |= (static_cast(1) << (mnBitIndex & kBitsPerWordMask)); + else + *mpBitWord &= ~(static_cast(1) << (mnBitIndex & kBitsPerWordMask)); + return *this; + } + + + template + inline typename bitset::reference& + bitset::reference::operator=(const reference& x) + { + if(*x.mpBitWord & (static_cast(1) << (x.mnBitIndex & kBitsPerWordMask))) + *mpBitWord |= (static_cast(1) << (mnBitIndex & kBitsPerWordMask)); + else + *mpBitWord &= ~(static_cast(1) << (mnBitIndex & kBitsPerWordMask)); + return *this; + } + + + template + inline bool bitset::reference::operator~() const + { + return (*mpBitWord & (static_cast(1) << (mnBitIndex & kBitsPerWordMask))) == 0; + } + + + //Defined inline in the class because Metrowerks fails to be able to compile it here. + //template + //inline bitset::reference::operator bool() const + //{ + // return (*mpBitWord & (static_cast(1) << (mnBitIndex & kBitsPerWordMask))) != 0; + //} + + + template + inline typename bitset::reference& + bitset::reference::flip() + { + *mpBitWord ^= static_cast(1) << (mnBitIndex & kBitsPerWordMask); + return *this; + } + + + + + /////////////////////////////////////////////////////////////////////////// + // bitset + /////////////////////////////////////////////////////////////////////////// + + template + inline bitset::bitset() + { + reset(); + } + + EA_DISABLE_VC_WARNING(6313) +#if EA_IS_ENABLED(EASTL_DEPRECATIONS_FOR_2024_SEPT) + template + inline bitset::bitset(unsigned long long value) + { + detail::from_unsigned_integral(*this, value); + } +#else + template + inline bitset::bitset(uint32_t value) + { + detail::from_unsigned_integral(*this, value); + } +#endif + EA_RESTORE_VC_WARNING() + + + template + inline typename bitset::this_type& + bitset::operator&=(const this_type& x) + { + base_type::operator&=(x); + return *this; + } + + + template + inline typename bitset::this_type& + bitset::operator|=(const this_type& x) + { + base_type::operator|=(x); + return *this; + } + + + template + inline typename bitset::this_type& + bitset::operator^=(const this_type& x) + { + base_type::operator^=(x); + return *this; + } + + + template + inline typename bitset::this_type& + bitset::operator<<=(size_type n) + { + if(EASTL_LIKELY((intptr_t)n < (intptr_t)N)) + { + EA_DISABLE_VC_WARNING(6313) + base_type::operator<<=(n); + if((N & kBitsPerWordMask) || (N == 0)) // If there are any high bits to clear... (If we didn't have this check, then the code below would do the wrong thing when N == 32. + mWord[kWordCount - 1] &= ~(static_cast(~static_cast(0)) << (N & kBitsPerWordMask)); // This clears any high unused bits. We need to do this so that shift operations proceed correctly. + EA_RESTORE_VC_WARNING() + } + else + base_type::reset(); + return *this; + } + + + template + inline typename bitset::this_type& + bitset::operator>>=(size_type n) + { + if(EASTL_LIKELY(n < N)) + base_type::operator>>=(n); + else + base_type::reset(); + return *this; + } + + + template + inline typename bitset::this_type& + bitset::set() + { + base_type::set(); // This sets all bits. + if((N & kBitsPerWordMask) || (N == 0)) // If there are any high bits to clear... (If we didn't have this check, then the code below would do the wrong thing when N == 32. + mWord[kWordCount - 1] &= ~(static_cast(~static_cast(0)) << (N & kBitsPerWordMask)); // This clears any high unused bits. We need to do this so that shift operations proceed correctly. + return *this; + } + + + template + inline typename bitset::this_type& + bitset::set(size_type i, bool value) + { + if(i < N) + base_type::set(i, value); + else + { + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(!(i < N))) + EASTL_FAIL_MSG("bitset::set -- out of range"); + #endif + + #if EASTL_EXCEPTIONS_ENABLED + throw std::out_of_range("bitset::set"); + #endif + } + + return *this; + } + + + template + inline typename bitset::this_type& + bitset::reset() + { + base_type::reset(); + return *this; + } + + + template + inline typename bitset::this_type& + bitset::reset(size_type i) + { + if(EASTL_LIKELY(i < N)) + DoGetWord(i) &= ~(static_cast(1) << (i & kBitsPerWordMask)); + else + { + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(!(i < N))) + EASTL_FAIL_MSG("bitset::reset -- out of range"); + #endif + + #if EASTL_EXCEPTIONS_ENABLED + throw std::out_of_range("bitset::reset"); + #endif + } + + return *this; + } + + + template + inline typename bitset::this_type& + bitset::flip() + { + EA_DISABLE_VC_WARNING(6313) + base_type::flip(); + if((N & kBitsPerWordMask) || (N == 0)) // If there are any high bits to clear... (If we didn't have this check, then the code below would do the wrong thing when N == 32. + mWord[kWordCount - 1] &= ~(static_cast(~static_cast(0)) << (N & kBitsPerWordMask)); // This clears any high unused bits. We need to do this so that shift operations proceed correctly. + return *this; + EA_RESTORE_VC_WARNING() + } + + + template + inline typename bitset::this_type& + bitset::flip(size_type i) + { + if(EASTL_LIKELY(i < N)) + DoGetWord(i) ^= (static_cast(1) << (i & kBitsPerWordMask)); + else + { + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(!(i < N))) + EASTL_FAIL_MSG("bitset::flip -- out of range"); + #endif + + #if EASTL_EXCEPTIONS_ENABLED + throw std::out_of_range("bitset::flip"); + #endif + } + return *this; + } + + + template + inline typename bitset::this_type + bitset::operator~() const + { + return this_type(*this).flip(); + } + + + template + inline typename bitset::reference + bitset::operator[](size_type i) + { + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(!(i < N))) + EASTL_FAIL_MSG("bitset::operator[] -- out of range"); + #endif + + return reference(*this, i); + } + + + template + inline bool bitset::operator[](size_type i) const + { + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(!(i < N))) + EASTL_FAIL_MSG("bitset::operator[] -- out of range"); + #endif + + return (DoGetWord(i) & (static_cast(1) << (i & kBitsPerWordMask))) != 0; + } + + + template + inline const typename bitset::word_type* bitset::data() const + { + return base_type::mWord; + } + + + template + inline typename bitset::word_type* bitset::data() + { + return base_type::mWord; + } + + + template + inline void bitset::from_uint32(uint32_t value) + { + detail::from_unsigned_integral(*this, value); + } + + + template + inline void bitset::from_uint64(uint64_t value) + { + detail::from_unsigned_integral(*this, value); + } + + + template + inline unsigned long bitset::to_ulong() const + { + return detail::to_unsigned_integral(*this); + } + + + template + inline uint32_t bitset::to_uint32() const + { + return detail::to_unsigned_integral(*this); + } + + + template + inline uint64_t bitset::to_uint64() const + { + return detail::to_unsigned_integral(*this); + } + + + // template + // inline typename bitset::size_type + // bitset::count() const + // { + // return base_type::count(); + // } + + + template + inline typename bitset::size_type + bitset::size() const + { + return (size_type)N; + } + + + template + inline bool bitset::operator==(const this_type& x) const + { + return base_type::operator==(x); + } + +#if !defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON) + template + inline bool bitset::operator!=(const this_type& x) const + { + return !base_type::operator==(x); + } +#endif + + template + inline bool bitset::test(size_type i) const + { + if(EASTL_UNLIKELY(i < N)) + return (DoGetWord(i) & (static_cast(1) << (i & kBitsPerWordMask))) != 0; + + #if EASTL_ASSERT_ENABLED + EASTL_FAIL_MSG("bitset::test -- out of range"); + #endif + + #if EASTL_EXCEPTIONS_ENABLED + throw std::out_of_range("bitset::test"); + #else + return false; + #endif + } + + + // template + // inline bool bitset::any() const + // { + // return base_type::any(); + // } + + + template + inline bool bitset::all() const + { + return count() == size(); + } + + + template + inline bool bitset::none() const + { + return !base_type::any(); + } + + + template + inline typename bitset::this_type + bitset::operator<<(size_type n) const + { + return this_type(*this).operator<<=(n); + } + + + template + inline typename bitset::this_type + bitset::operator>>(size_type n) const + { + return this_type(*this).operator>>=(n); + } + + + template + inline typename bitset::size_type + bitset::find_first() const + { + const size_type i = base_type::DoFindFirst(); + + if(i < kSize) + return i; + // Else i could be the base type bit count, so we clamp it to our size. + + return kSize; + } + + + template + inline typename bitset::size_type + bitset::find_next(size_type last_find) const + { + const size_type i = base_type::DoFindNext(last_find); + + if(i < kSize) + return i; + // Else i could be the base type bit count, so we clamp it to our size. + + return kSize; + } + + + template + inline typename bitset::size_type + bitset::find_last() const + { + const size_type i = base_type::DoFindLast(); + + if(i < kSize) + return i; + // Else i could be the base type bit count, so we clamp it to our size. + + return kSize; + } + + + template + inline typename bitset::size_type + bitset::find_prev(size_type last_find) const + { + const size_type i = base_type::DoFindPrev(last_find); + + if(i < kSize) + return i; + // Else i could be the base type bit count, so we clamp it to our size. + + return kSize; + } + + + + /////////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////////// + + template + inline bitset operator&(const bitset& a, const bitset& b) + { + // We get betting inlining when we don't declare temporary variables. + return bitset(a).operator&=(b); + } + + + template + inline bitset operator|(const bitset& a, const bitset& b) + { + return bitset(a).operator|=(b); + } + + + template + inline bitset operator^(const bitset& a, const bitset& b) + { + return bitset(a).operator^=(b); + } + + +} // namespace eastl + + +EA_RESTORE_VC_WARNING(); + +#endif // Header include guard diff --git a/external/EASTL/include/EASTL/bitvector.h b/external/EASTL/include/EASTL/bitvector.h new file mode 100644 index 00000000..b2f3ab5f --- /dev/null +++ b/external/EASTL/include/EASTL/bitvector.h @@ -0,0 +1,1472 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// Implements a bit vector, which is essentially a vector of bool but which +// uses bits instead of bytes. It is thus similar to the original std::vector. +/////////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// Note: This code is not yet complete: it isn't tested and doesn't yet +// support containers other than vector. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_BITVECTOR_H +#define EASTL_BITVECTOR_H + + +#include +#include +#include +#include +#if EASTL_EXCEPTIONS_ENABLED +#include +#endif + +EA_DISABLE_VC_WARNING(4480); // nonstandard extension used: specifying underlying type for enum + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + + /// EASTL_BITVECTOR_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// + #ifndef EASTL_BITVECTOR_DEFAULT_NAME + #define EASTL_BITVECTOR_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " bitvector" // Unless the user overrides something, this is "EASTL bitvector". + #endif + + /// EASTL_BITVECTOR_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_BITVECTOR_DEFAULT_ALLOCATOR + #define EASTL_BITVECTOR_DEFAULT_ALLOCATOR allocator_type(EASTL_BITVECTOR_DEFAULT_NAME) + #endif + + + + /// BitvectorWordType + /// Defines the integral data type used by bitvector. + typedef EASTL_BITSET_WORD_TYPE_DEFAULT BitvectorWordType; + + + template + class bitvector_const_iterator; + + + template + class bitvector_reference + { + public: + typedef eastl_size_t size_type; + bitvector_reference(Element* ptr, eastl_size_t i); + bitvector_reference(const bitvector_reference& other); + + bitvector_reference& operator=(bool value); + bitvector_reference& operator=(const bitvector_reference& rhs); + + operator bool() const // Defined here because some compilers fail otherwise. + { return (*mpBitWord & (Element(1) << mnBitIndex)) != 0; } + + protected: + friend class bitvector_const_iterator; + + Element* mpBitWord; + size_type mnBitIndex; + + bitvector_reference() {} + void CopyFrom(const bitvector_reference& rhs); + }; + + + + template + class bitvector_const_iterator + { + public: + typedef EASTL_ITC_NS::random_access_iterator_tag iterator_category; + typedef bitvector_const_iterator this_type; + typedef bool value_type; + typedef bitvector_reference reference_type; + typedef ptrdiff_t difference_type; + typedef Element element_type; + typedef element_type* pointer; // This is wrong. It needs to be someting that acts as a pointer to a bit. + typedef element_type& reference; // This is not right. It needs to be someting that acts as a pointer to a bit. + typedef eastl_size_t size_type; + + protected: + reference_type mReference; + + enum + { + kBitCount = (8 * sizeof(Element)) + }; + + public: + bool operator*() const; + bool operator[](difference_type n) const; + + bitvector_const_iterator(); + bitvector_const_iterator(const element_type* p, eastl_size_t i); + bitvector_const_iterator(const reference_type& referenceType); + bitvector_const_iterator(const bitvector_const_iterator& other); + + bitvector_const_iterator& operator++(); + bitvector_const_iterator operator++(int); + bitvector_const_iterator& operator--(); + bitvector_const_iterator operator--(int); + + bitvector_const_iterator& operator+=(difference_type dist); + bitvector_const_iterator& operator-=(difference_type dist); + bitvector_const_iterator operator+ (difference_type dist) const; + bitvector_const_iterator operator- (difference_type dist) const; + + difference_type operator-(const this_type& rhs) const; + + bitvector_const_iterator& operator= (const this_type& rhs); + + bool operator==(const this_type& rhs) const; + bool operator!=(const this_type& rhs) const; + + bool operator< (const this_type& rhs) const; + bool operator<=(const this_type& rhs) const; + bool operator> (const this_type& rhs) const; + bool operator>=(const this_type& rhs) const; + + int validate(const element_type* pStart, const element_type* pEnd, eastl_size_t nExtraBits) const; + + protected: + template + friend class bitvector; + + reference_type& get_reference_type() { return mReference; } + }; + + + + template + class bitvector_iterator : public bitvector_const_iterator + { + public: + typedef EASTL_ITC_NS::random_access_iterator_tag iterator_category; + typedef bitvector_iterator this_type; + typedef bitvector_const_iterator base_type; + typedef bool value_type; + typedef bitvector_reference reference_type; + typedef ptrdiff_t difference_type; + typedef Element element_type; + typedef element_type* pointer; // This is wrong. It needs to be someting that acts as a pointer to a bit. + typedef element_type& reference; // This is not right. It needs to be someting that acts as a pointer to a bit. + + public: + reference_type operator*() const; + reference_type operator[](difference_type n) const; + + bitvector_iterator(); + bitvector_iterator(element_type* p, eastl_size_t i); + bitvector_iterator(reference_type& referenceType); + + bitvector_iterator& operator++() { base_type::operator++(); return *this; } + bitvector_iterator& operator--() { base_type::operator--(); return *this; } + bitvector_iterator operator++(int); + bitvector_iterator operator--(int); + + bitvector_iterator& operator+=(difference_type dist) { base_type::operator+=(dist); return *this; } + bitvector_iterator& operator-=(difference_type dist) { base_type::operator-=(dist); return *this; } + bitvector_iterator operator+ (difference_type dist) const; + bitvector_iterator operator- (difference_type dist) const; + + // We need this here because we are overloading operator-, so for some reason the + // other overload of the function can't be found unless it's explicitly specified. + difference_type operator-(const base_type& rhs) const { return base_type::operator-(rhs); } + }; + + + + /// bitvector + /// + /// Implements an array of bits treated as boolean values. + /// bitvector is similar to vector but uses bits instead of bytes and + /// allows the user to use other containers such as deque instead of vector. + /// bitvector is different from bitset in that bitset is less flexible but + /// uses less memory and has higher performance. + /// + /// To consider: Rename the Element template parameter to WordType, for + /// consistency with bitset. + /// + template > + class bitvector + { + public: + typedef bitvector this_type; + typedef bool value_type; + typedef bitvector_reference reference; + typedef bool const_reference; + typedef bitvector_iterator iterator; + typedef bitvector_const_iterator const_iterator; + typedef eastl::reverse_iterator reverse_iterator; + typedef eastl::reverse_iterator const_reverse_iterator; + typedef Allocator allocator_type; + typedef Element element_type; + typedef Container container_type; + typedef eastl_size_t size_type; + typedef ptrdiff_t difference_type; + + #if defined(_MSC_VER) && (_MSC_VER >= 1400) && (_MSC_VER <= 1600) && !EASTL_STD_CPP_ONLY // _MSC_VER of 1400 means VS2005, 1600 means VS2010. VS2012 generates errors with usage of enum:size_type. + enum : size_type { // Use Microsoft enum language extension, allowing for smaller debug symbols than using a static const. Users have been affected by this. + npos = container_type::npos, + kMaxSize = container_type::kMaxSize + }; + #else + static const size_type npos = container_type::npos; /// 'npos' means non-valid position or simply non-position. + static const size_type kMaxSize = container_type::kMaxSize; /// -1 is reserved for 'npos'. It also happens to be slightly beneficial that kMaxSize is a value less than -1, as it helps us deal with potential integer wraparound issues. + #endif + + enum + { + kBitCount = 8 * sizeof(Element) + }; + + protected: + container_type mContainer; + size_type mFreeBitCount; // Unused bits in the last word of mContainer. + + public: + bitvector(); + explicit bitvector(const allocator_type& allocator); + explicit bitvector(size_type n, const allocator_type& allocator = EASTL_BITVECTOR_DEFAULT_ALLOCATOR); + bitvector(size_type n, value_type value, const allocator_type& allocator = EASTL_BITVECTOR_DEFAULT_ALLOCATOR); + + template + bitvector(InputIterator first, InputIterator last); + + void swap(this_type& x); + + template + void assign(InputIterator first, InputIterator last); + + iterator begin() EA_NOEXCEPT; + const_iterator begin() const EA_NOEXCEPT; + const_iterator cbegin() const EA_NOEXCEPT; + + iterator end() EA_NOEXCEPT; + const_iterator end() const EA_NOEXCEPT; + const_iterator cend() const EA_NOEXCEPT; + + reverse_iterator rbegin() EA_NOEXCEPT; + const_reverse_iterator rbegin() const EA_NOEXCEPT; + const_reverse_iterator crbegin() const EA_NOEXCEPT; + + reverse_iterator rend() EA_NOEXCEPT; + const_reverse_iterator rend() const EA_NOEXCEPT; + const_reverse_iterator crend() const EA_NOEXCEPT; + + bool empty() const EA_NOEXCEPT; + size_type size() const EA_NOEXCEPT; + size_type capacity() const EA_NOEXCEPT; + + void resize(size_type n, value_type value); + void resize(size_type n); + void reserve(size_type n); + void set_capacity(size_type n = npos); // Revises the capacity to the user-specified value. Resizes the container to match the capacity if the requested capacity n is less than the current size. If n == npos then the capacity is reallocated (if necessary) such that capacity == size. + + void push_back(); + void push_back(value_type value); + void pop_back(); + + reference front(); + const_reference front() const; + reference back(); + const_reference back() const; + + bool test(size_type n, bool defaultValue) const; // Returns true if the bit index is < size() and set. Returns defaultValue if the bit is >= size(). + void set(size_type n, bool value); // Resizes the container to accomodate n if necessary. + + reference at(size_type n); // throws an out_of_range exception if n is invalid. + const_reference at(size_type n) const; + + reference operator[](size_type n); // behavior is undefined if n is invalid. + const_reference operator[](size_type n) const; + + /* + Work in progress: + template iterator find_first(); // Finds the lowest "on" bit. + template iterator find_next(const_iterator it); // Finds the next lowest "on" bit after it. + template iterator find_last(); // Finds the index of the last "on" bit, returns size if none are set. + template iterator find_prev(const_iterator it); // Finds the index of the last "on" bit before last_find, returns size if none are set. + + template const_iterator find_first() const; // Finds the lowest "on" bit. + template const_iterator find_next(const_iterator it) const; // Finds the next lowest "on" bit after it. + template const_iterator find_last() const; // Finds the index of the last "on" bit, returns size if none are set. + template const_iterator find_prev(const_iterator it) const; // Finds the index of the last "on" bit before last_find, returns size if none are set. + */ + + element_type* data() EA_NOEXCEPT; + const element_type* data() const EA_NOEXCEPT; + + iterator insert(const_iterator position, value_type value); + void insert(const_iterator position, size_type n, value_type value); + + // template Not yet implemented. See below for disabled definition. + // void insert(const_iterator position, InputIterator first, InputIterator last); + + iterator erase(const_iterator position); + iterator erase(const_iterator first, const_iterator last); + + reverse_iterator erase(const_reverse_iterator position); + reverse_iterator erase(const_reverse_iterator first, const_reverse_iterator last); + + void clear(); + void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs. + + container_type& get_container(); + const container_type& get_container() const; + + bool validate() const; + int validate_iterator(const_iterator i) const; + }; + + + + + /////////////////////////////////////////////////////////////////////// + // bitvector_reference + /////////////////////////////////////////////////////////////////////// + + template + bitvector_reference::bitvector_reference(Element* p, eastl_size_t i) + : mpBitWord(p), + mnBitIndex(i) + { + } + + + template + bitvector_reference::bitvector_reference(const bitvector_reference& other) + : mpBitWord(other.mpBitWord), + mnBitIndex(other.mnBitIndex) + { + } + + + template + bitvector_reference& + bitvector_reference::operator=(bool value) + { + const Element mask = (Element)(Element(1) << mnBitIndex); + + if(value) + *mpBitWord |= mask; + else + *mpBitWord &= ~mask; + + return *this; + } + + + template + bitvector_reference& + bitvector_reference::operator=(const bitvector_reference& rhs) + { + return (*this = (bool)rhs); + } + + + template + void bitvector_reference::CopyFrom(const bitvector_reference& rhs) + { + mpBitWord = rhs.mpBitWord; + mnBitIndex = rhs.mnBitIndex; + } + + + + + /////////////////////////////////////////////////////////////////////// + // bitvector_const_iterator + /////////////////////////////////////////////////////////////////////// + + template + bitvector_const_iterator::bitvector_const_iterator() + : mReference(0, 0) + { + } + + + template + bitvector_const_iterator::bitvector_const_iterator(const Element* p, eastl_size_t i) + : mReference(const_cast(p), i) // const_cast is safe here because we never let mReference leak and we don't modify it. + { + } + + + template + bitvector_const_iterator::bitvector_const_iterator(const reference_type& reference) + : mReference(reference) + { + } + + + template + bitvector_const_iterator::bitvector_const_iterator(const bitvector_const_iterator& other) + : mReference(other.mReference) + { + } + + + template + bitvector_const_iterator& + bitvector_const_iterator::operator++() + { + ++mReference.mnBitIndex; + + if(mReference.mnBitIndex == kBitCount) + { + ++mReference.mpBitWord; + mReference.mnBitIndex = 0; + } + + return *this; + } + + + template + bitvector_const_iterator& + bitvector_const_iterator::operator--() + { + if(mReference.mnBitIndex == 0) + { + --mReference.mpBitWord; + mReference.mnBitIndex = kBitCount; + } + + --mReference.mnBitIndex; + return *this; + } + + + template + bitvector_const_iterator + bitvector_const_iterator::operator++(int) + { + bitvector_const_iterator copy(*this); + ++*this; + return copy; + } + + + template + bitvector_const_iterator + bitvector_const_iterator::operator--(int) + { + bitvector_const_iterator copy(*this); + --*this; + return copy; + } + + + template + bitvector_const_iterator& + bitvector_const_iterator::operator+=(difference_type n) + { + n += mReference.mnBitIndex; + + if(n >= difference_type(0)) + { + mReference.mpBitWord += n / kBitCount; + mReference.mnBitIndex = (size_type)(n % kBitCount); + } + else + { + // backwards is tricky + // figure out how many full words backwards we need to move + // n = [-1..-32] => 1 + // n = [-33..-64] => 2 + const size_type backwards = (size_type)(-n + kBitCount - 1); + mReference.mpBitWord -= backwards / kBitCount; + + // -1 => 31; backwards = 32; 31 - (backwards % 32) = 31 + // -2 => 30; backwards = 33; 31 - (backwards % 32) = 30 + // -3 => 29; backwards = 34 + // .. + // -32 => 0; backwards = 63; 31 - (backwards % 32) = 0 + // -33 => 31; backwards = 64; 31 - (backwards % 32) = 31 + mReference.mnBitIndex = (kBitCount - 1) - (backwards % kBitCount); + } + + return *this; + } + + + template + bitvector_const_iterator& + bitvector_const_iterator::operator-=(difference_type n) + { + return (*this += -n); + } + + + template + bitvector_const_iterator + bitvector_const_iterator::operator+(difference_type n) const + { + bitvector_const_iterator copy(*this); + copy += n; + return copy; + } + + + template + bitvector_const_iterator + bitvector_const_iterator::operator-(difference_type n) const + { + bitvector_const_iterator copy(*this); + copy -= n; + return copy; + } + + + template + typename bitvector_const_iterator::difference_type + bitvector_const_iterator::operator-(const this_type& rhs) const + { + return ((mReference.mpBitWord - rhs.mReference.mpBitWord) * kBitCount) + mReference.mnBitIndex - rhs.mReference.mnBitIndex; + } + + + template + bool bitvector_const_iterator::operator==(const this_type& rhs) const + { + return (mReference.mpBitWord == rhs.mReference.mpBitWord) && (mReference.mnBitIndex == rhs.mReference.mnBitIndex); + } + + + template + bool bitvector_const_iterator::operator!=(const this_type& rhs) const + { + return !(*this == rhs); + } + + + template + bool bitvector_const_iterator::operator<(const this_type& rhs) const + { + return (mReference.mpBitWord < rhs.mReference.mpBitWord) || + ((mReference.mpBitWord == rhs.mReference.mpBitWord) && (mReference.mnBitIndex < rhs.mReference.mnBitIndex)); + } + + + template + bool bitvector_const_iterator::operator<=(const this_type& rhs) const + { + return (mReference.mpBitWord < rhs.mReference.mpBitWord) || + ((mReference.mpBitWord == rhs.mReference.mpBitWord) && (mReference.mnBitIndex <= rhs.mReference.mnBitIndex)); + } + + + template + bool bitvector_const_iterator::operator>(const this_type& rhs) const + { + return !(*this <= rhs); + } + + + template + bool bitvector_const_iterator::operator>=(const this_type& rhs) const + { + return !(*this < rhs); + } + + + template + bool bitvector_const_iterator::operator*() const + { + return mReference; + } + + + template + bool bitvector_const_iterator::operator[](difference_type n) const + { + return *(*this + n); + } + + + template + bitvector_const_iterator& bitvector_const_iterator::operator= (const this_type& rhs) + { + mReference.CopyFrom(rhs.mReference); + return *this; + } + + + template + int bitvector_const_iterator::validate(const Element* pStart, const Element* pEnd, eastl_size_t nExtraBits) const + { + const Element* const pCurrent = mReference.mpBitWord; + + if(pCurrent >= pStart) + { + if(nExtraBits == 0) + { + if(pCurrent == pEnd && mReference) + return eastl::isf_valid | eastl::isf_current; + else if(pCurrent < pEnd) + return eastl::isf_valid | eastl::isf_current | eastl::isf_can_dereference; + } + else if(pCurrent == (pEnd - 1)) + { + const size_type bit = mReference.mnBitIndex; + const size_type lastbit = kBitCount - nExtraBits; + + if(bit == lastbit) + return eastl::isf_valid | eastl::isf_current; + else if(bit < lastbit) + return eastl::isf_valid | eastl::isf_current | eastl::isf_can_dereference; + } + else if(pCurrent < pEnd) + { + return eastl::isf_valid | eastl::isf_current | eastl::isf_can_dereference; + } + } + + return eastl::isf_none; + } + + + + /////////////////////////////////////////////////////////////////////// + // bitvector_iterator + /////////////////////////////////////////////////////////////////////// + + template + bitvector_iterator::bitvector_iterator() + : base_type() + { + } + + template + bitvector_iterator::bitvector_iterator(Element* p, eastl_size_t i) + : base_type(p, i) + { + } + + + template + bitvector_iterator::bitvector_iterator(reference_type& reference) + : base_type(reference) + { + } + + + template + typename bitvector_iterator::reference_type + bitvector_iterator::operator*() const + { + return base_type::mReference; + } + + + template + typename bitvector_iterator::reference_type + bitvector_iterator::operator[](difference_type n) const + { + return *(*this + n); + } + + + template + void MoveBits(bitvector_iterator start, + bitvector_iterator end, + bitvector_iterator dest) + { + // Slow implemenation; could optimize by moving a word at a time. + if(dest <= start) + { + while(start != end) + { + *dest = *start; + ++dest; + ++start; + } + } + else + { + // Need to move backwards + dest += (end - start); + + while(start != end) + { + --dest; + --end; + *dest = *end; + } + } + } + + + template + bitvector_iterator + bitvector_iterator::operator++(int) + { + bitvector_iterator copy(*this); + ++*this; + return copy; + } + + + template + bitvector_iterator + bitvector_iterator::operator--(int) + { + bitvector_iterator copy(*this); + --*this; + return copy; + } + + + template + bitvector_iterator + bitvector_iterator::operator+(difference_type n) const + { + bitvector_iterator copy(*this); + copy += n; + return copy; + } + + + template + bitvector_iterator + bitvector_iterator::operator-(difference_type n) const + { + bitvector_iterator copy(*this); + copy -= n; + return copy; + } + + + + + /////////////////////////////////////////////////////////////////////// + // bitvector + /////////////////////////////////////////////////////////////////////// + + template + template + void bitvector::assign(InputIterator first, InputIterator last) + { + // To consider: We can maybe specialize this on bitvector_iterator to do a fast bitwise copy. + // We can also specialize for random access iterators to figure out the size & reserve first. + + clear(); + + while(first != last) + { + push_back(*first); + ++first; + } + } + + + template + typename bitvector::iterator + bitvector::begin() EA_NOEXCEPT + { + return iterator(mContainer.begin(), 0); + } + + + template + typename bitvector::const_iterator + bitvector::begin() const EA_NOEXCEPT + { + return const_iterator(mContainer.begin(), 0); + } + + + template + typename bitvector::const_iterator + bitvector::cbegin() const EA_NOEXCEPT + { + return const_iterator(mContainer.begin(), 0); + } + + + template + typename bitvector::iterator + bitvector::end() EA_NOEXCEPT + { + return iterator(mContainer.end(), 0) - mFreeBitCount; + } + + + template + typename bitvector::const_iterator + bitvector::end() const EA_NOEXCEPT + { + return const_iterator(mContainer.end(), 0) - mFreeBitCount; + } + + + template + typename bitvector::const_iterator + bitvector::cend() const EA_NOEXCEPT + { + return const_iterator(mContainer.end(), 0) - mFreeBitCount; + } + + + template + bool bitvector::empty() const EA_NOEXCEPT + { + return mContainer.empty(); + } + + + template + typename bitvector::size_type + bitvector::size() const EA_NOEXCEPT + { + return (mContainer.size() * kBitCount) - mFreeBitCount; + } + + + template + typename bitvector::size_type + bitvector::capacity() const EA_NOEXCEPT + { + return mContainer.capacity() * kBitCount; + } + + + template + void bitvector::set_capacity(size_type n) + { + if(n == npos) + mContainer.set_capacity(npos); + else + mContainer.set_capacity((n + kBitCount - 1) / kBitCount); + } + + + template + typename bitvector::reverse_iterator + bitvector::rbegin() EA_NOEXCEPT + { + return reverse_iterator(end()); + } + + + template + typename bitvector::const_reverse_iterator + bitvector::rbegin() const EA_NOEXCEPT + { + return const_reverse_iterator(end()); + } + + + template + typename bitvector::const_reverse_iterator + bitvector::crbegin() const EA_NOEXCEPT + { + return const_reverse_iterator(end()); + } + + + template + typename bitvector::reverse_iterator + bitvector::rend() EA_NOEXCEPT + { + return reverse_iterator(begin()); + } + + + template + typename bitvector::const_reverse_iterator + bitvector::rend() const EA_NOEXCEPT + { + return const_reverse_iterator(begin()); + } + + + template + typename bitvector::const_reverse_iterator + bitvector::crend() const EA_NOEXCEPT + { + return const_reverse_iterator(begin()); + } + + + template + typename bitvector::reference + bitvector::front() + { + EASTL_ASSERT(!empty()); + return reference(&mContainer[0], 0); + } + + + template + typename bitvector::const_reference + bitvector::front() const + { + EASTL_ASSERT(!empty()); + + // To consider: make a better solution to this than const_cast. + return reference(const_cast(&mContainer[0]), 0); + } + + + template + typename bitvector::reference + bitvector::back() + { + EASTL_ASSERT(!empty()); + return *(--end()); + } + + + template + typename bitvector::const_reference + bitvector::back() const + { + EASTL_ASSERT(!empty()); + return *(--end()); + } + + + template + void bitvector::push_back() + { + if(!mFreeBitCount) + { + mContainer.push_back(); + mFreeBitCount = kBitCount; + } + + --mFreeBitCount; + } + + + template + void bitvector::push_back(value_type value) + { + push_back(); + *--end() = value; + } + + + template + void bitvector::pop_back() + { + EASTL_ASSERT(!empty()); + + if(++mFreeBitCount == kBitCount) + { + mContainer.pop_back(); + mFreeBitCount = 0; + } + } + + + template + void bitvector::reserve(size_type n) + { + const size_type wordCount = (n + kBitCount - 1) / kBitCount; + mContainer.reserve(wordCount); + } + + + template + void bitvector::resize(size_type n) + { + const size_type wordCount = (n + kBitCount - 1) / kBitCount; + const size_type extra = (wordCount * kBitCount) - n; + + mContainer.resize(wordCount); + mFreeBitCount = extra; + } + + + template + void bitvector::resize(size_type n, value_type value) + { + const size_type s = size(); + if(n < s) + resize(n); + + // Fill up to the end of a word + size_type newbits = n - s; + + while(mFreeBitCount && newbits) + { + push_back(value); + --newbits; + } + + // Fill the rest a word at a time + if(newbits) + { + element_type element(0); + if(value) + element = ~element; + + const size_type words = (n + kBitCount - 1) / kBitCount; + const size_type extra = words * kBitCount - n; + mContainer.resize(words, element); + mFreeBitCount = extra; + } + } + + + template + bool bitvector::test(size_type n, bool defaultValue) const + { + if(n < size()) + return *(begin() + (difference_type)n); + + return defaultValue; + } + + + template + void bitvector::set(size_type n, bool value) + { + if(EASTL_UNLIKELY(n >= size())) + resize(n + 1); + + *(begin() + (difference_type)n) = value; + } + + + template + typename bitvector::reference + bitvector::at(size_type n) + { + // The difference between at and operator[] is that at signals + // if the requested position is out of range by throwing an + // out_of_range exception. + + #if EASTL_EXCEPTIONS_ENABLED + if(EASTL_UNLIKELY(n >= size())) + throw std::out_of_range("bitvector::at -- out of range"); + #elif EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(n >= size())) + EASTL_FAIL_MSG("bitvector::at -- out of range"); + #endif + + return *(begin() + (difference_type)n); + } + + + template + typename bitvector::const_reference + bitvector::at(size_type n) const + { + #if EASTL_EXCEPTIONS_ENABLED + if(EASTL_UNLIKELY(n >= size())) + throw std::out_of_range("bitvector::at -- out of range"); + #elif EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(n >= size())) + EASTL_FAIL_MSG("bitvector::at -- out of range"); + #endif + + return *(begin() + (difference_type)n); + } + + + template + typename bitvector::reference + bitvector::operator[](size_type n) + { + return *(begin() + (difference_type)n); + } + + + template + typename bitvector::const_reference + bitvector::operator[](size_type n) const + { + return *(begin() + (difference_type)n); + } + + +/* + template + template + typename bitvector::iterator + bitvector::find_first() + { + return begin(); + } + + template iterator find_next(const_iterator it); + template iterator find_last(); + template iterator find_prev(const_iterator it); + + template const_iterator find_first() const; + template const_iterator find_next(const_iterator it) const; + template const_iterator find_last() const; + template const_iterator find_prev(const_iterator it) const; +*/ + + + + + template + inline typename bitvector::container_type& + bitvector::get_container() + { + return mContainer; + } + + + template + inline const typename bitvector::container_type& + bitvector::get_container() const + { + return mContainer; + } + + + template + bool bitvector::validate() const + { + if(!mContainer.validate()) + return false; + + if((unsigned)mFreeBitCount >= kBitCount) + return false; + + return true; + } + + + template + int bitvector::validate_iterator(const_iterator i) const + { + return i.validate(mContainer.begin(), mContainer.end(), mFreeBitCount); + } + + + template + typename bitvector::element_type* + bitvector::data() EA_NOEXCEPT + { + return mContainer.data(); + } + + + template + const typename bitvector::element_type* + bitvector::data() const EA_NOEXCEPT + { + return mContainer.data(); + } + + + template + typename bitvector::iterator + bitvector::insert(const_iterator position, value_type value) + { + iterator iPosition(position.get_reference_type()); // This is just a non-const version of position. + + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(validate_iterator(iPosition) & eastl::isf_valid) == 0) + EASTL_FAIL_MSG("bitvector::insert -- invalid iterator"); + #endif + + // Save because we might reallocate + const typename iterator::difference_type n = iPosition - begin(); + push_back(); + iPosition = begin() + n; + + MoveBits(iPosition, --end(), ++iterator(iPosition)); + *iPosition = value; + + return iPosition; + } + + + template + void bitvector::insert(const_iterator position, size_type n, value_type value) + { + iterator iPosition(position.get_reference_type()); // This is just a non-const version of position. + + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(validate_iterator(iPosition) & eastl::isf_valid) == 0) + EASTL_FAIL_MSG("bitvector::insert -- invalid iterator"); + #endif + + // Save because we might reallocate. + const typename iterator::difference_type p = iPosition - begin(); + resize(size() + n); + iPosition = begin() + p; + + iterator insert_end = iPosition + n; + MoveBits(iPosition, end() - n, insert_end); + + // To do: Optimize this to word-at-a-time for large inserts + while(iPosition != insert_end) + { + *iPosition = value; + ++iPosition; + } + } + + + /* + The following is a placeholder for a future implementation. It turns out that a correct implementation of + insert(pos, first, last) is a non-trivial exercise that would take a few hours to implement and test. + The reasons why involve primarily the problem of handling the case where insertion source comes from + within the container itself, and the case that first and last (note they are templated) might not refer + to iterators might refer to a value/count pair. The C++ Standard requires you to handle this case and + I (Paul Pedriana) believe that it applies even for a bitvector, given that bool is an integral type. + So you have to set up a compile-time type traits function chooser. See vector, for example. + + template + template + void bitvector::insert(const_iterator position, InputIterator first, InputIterator last) + { + iterator iPosition(position.get_reference_type()); // This is just a non-const version of position. + + // This implementation is probably broken due to not handling insertion into self. + // To do: Make a more efficient version of this. + difference_type distance = (iPosition - begin()); + + while(first != last) + { + insert(iPosition, *first); + iPosition = begin() + ++distance; + ++first; + } + } + */ + + + template + typename bitvector::iterator + bitvector::erase(const_iterator position) + { + iterator iPosition(position.get_reference_type()); // This is just a non-const version of position. + + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(validate_iterator(iPosition) & eastl::isf_can_dereference) == 0) + EASTL_FAIL_MSG("bitvector::erase -- invalid iterator"); + #endif + + MoveBits(++iterator(iPosition), end(), iPosition); + resize(size() - 1); + + // Verify that no reallocation occurred. + EASTL_ASSERT(validate_iterator(iPosition) & eastl::isf_valid); + return iPosition; + } + + + template + typename bitvector::iterator + bitvector::erase(const_iterator first, const_iterator last) + { + iterator iFirst(first.get_reference_type()); // This is just a non-const version of first. + iterator iLast(last.get_reference_type()); // This is just a non-const version of last. + + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(validate_iterator(iLast) & eastl::isf_valid) == 0) + EASTL_FAIL_MSG("bitvector::erase -- invalid iterator"); + #endif + + if(!(iFirst == iLast)) + { + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(validate_iterator(iFirst) & eastl::isf_can_dereference) == 0) + EASTL_FAIL_MSG("bitvector::erase -- invalid iterator"); + #endif + + const size_type eraseCount = (size_type)(iLast - iFirst); + MoveBits(iLast, end(), iFirst); + resize(size() - eraseCount); + + // Verify that no reallocation occurred. + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(validate_iterator(iFirst) & eastl::isf_valid) == 0) + EASTL_FAIL_MSG("bitvector::erase -- invalid iterator"); + #endif + } + + return iFirst; + } + + + template + typename bitvector::reverse_iterator + bitvector::erase(const_reverse_iterator position) + { + return reverse_iterator(erase((++position).base())); + } + + + template + typename bitvector::reverse_iterator + bitvector::erase(const_reverse_iterator first, const_reverse_iterator last) + { + // Version which erases in order from first to last. + // difference_type i(first.base() - last.base()); + // while(i--) + // first = erase(first); + // return first; + + // Version which erases in order from last to first, but is slightly more efficient: + return reverse_iterator(erase(last.base(), first.base())); + } + + + template + void bitvector::swap(this_type& rhs) + { + mContainer.swap(rhs.mContainer); + eastl::swap(mFreeBitCount, rhs.mFreeBitCount); + } + + + template + void bitvector::reset_lose_memory() + { + mContainer.reset_lose_memory(); // intentional memory leak. + mFreeBitCount = 0; + } + + + template + void bitvector::clear() + { + mContainer.clear(); + mFreeBitCount = 0; + } + + + template + bitvector::bitvector() + : mContainer(), + mFreeBitCount(0) + { + } + + + template + bitvector::bitvector(const allocator_type& allocator) + : mContainer(allocator), + mFreeBitCount(0) + { + } + + + template + bitvector::bitvector(size_type n, const allocator_type& allocator) + : mContainer((n + kBitCount - 1) / kBitCount, allocator) + { + mFreeBitCount = kBitCount - (n % kBitCount); + + if(mFreeBitCount == kBitCount) + mFreeBitCount = 0; + } + + + template + bitvector::bitvector(size_type n, value_type value, const allocator_type& allocator) + : mContainer((n + kBitCount - 1) / kBitCount, value ? ~element_type(0) : element_type(0), allocator) + { + mFreeBitCount = kBitCount - (n % kBitCount); + + if(mFreeBitCount == kBitCount) + mFreeBitCount = 0; + } + + + template + template + bitvector::bitvector(InputIterator first, InputIterator last) + : mContainer(), + mFreeBitCount(0) + { + assign(first, last); + } + + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + inline bool operator==(const bitvector& a, + const bitvector& b) + { + // To do: Replace this with a smart compare implementation. This is much slower than it needs to be. + return ((a.size() == b.size()) && eastl::equal(a.begin(), a.end(), b.begin())); + } + + + template + inline bool operator!=(const bitvector& a, + const bitvector& b) + { + return !operator==(a, b); + } + + + template + inline bool operator<(const bitvector& a, + const bitvector& b) + { + // To do: Replace this with a smart compare implementation. This is much slower than it needs to be. + return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end()); + } + + + template + inline bool operator>(const bitvector& a, + const bitvector& b) + { + return b < a; + } + + + template + inline bool operator<=(const bitvector& a, + const bitvector& b) + { + return !(b < a); + } + + + template + inline bool operator>=(const bitvector& a, + const bitvector& b) + { + return !(a < b); + } + + template + inline void swap(bitvector& a, + bitvector& b) + { + a.swap(b); + } + + +} // namespace eastl + + +EA_RESTORE_VC_WARNING(); + +#endif // Header include guard diff --git a/external/EASTL/include/EASTL/bonus/adaptors.h b/external/EASTL/include/EASTL/bonus/adaptors.h new file mode 100644 index 00000000..423cacdd --- /dev/null +++ b/external/EASTL/include/EASTL/bonus/adaptors.h @@ -0,0 +1,88 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ADAPTORS_H +#define EASTL_ADAPTORS_H + + +#include +#include +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + +EA_DISABLE_VC_WARNING(4512 4626) +#if defined(_MSC_VER) && (_MSC_VER >= 1900) // VS2015+ + EA_DISABLE_VC_WARNING(5027) // move assignment operator was implicitly defined as deleted +#endif + + +namespace eastl +{ + /// reverse + /// + /// This adaptor allows reverse iteration of a container in ranged base for-loops. + /// + /// for (auto& i : reverse(c)) { ... } + /// + template + struct reverse_wrapper + { + template + reverse_wrapper(C&& c) + : mContainer(eastl::forward(c)) + { + /** + * NOTE: + * + * Due to reference collapsing rules of universal references Container type is either + * + * const C& if the input is a const lvalue + * C& if the input is a non-const lvalue + * C if the input is an rvalue + * const C if the input is a const rvalue thus the object will have to be copied and the copy-ctor will be called + * + * + * Thus we either move the whole container into this object or take a reference to the lvalue avoiding the copy. + * The static_assert below ensures this. + */ + static_assert(eastl::is_same_v, "Reference collapsed deduced type must be the same as the deduced Container type!"); + } + + Container mContainer; + }; + + template + auto begin(const reverse_wrapper& w) -> decltype(eastl::rbegin(w.mContainer)) + { + return eastl::rbegin(w.mContainer); + } + + template + auto end(const reverse_wrapper& w) -> decltype(eastl::rend(w.mContainer)) + { + return eastl::rend(w.mContainer); + } + + template + reverse_wrapper reverse(Container&& c) + { + return reverse_wrapper(eastl::forward(c)); + } + +} // namespace eastl + +#if defined(_MSC_VER) && (_MSC_VER >= 1900) // VS2015+ + EA_RESTORE_VC_WARNING() +#endif +EA_RESTORE_VC_WARNING() + +#endif // Header include guard diff --git a/external/EASTL/include/EASTL/bonus/call_traits.h b/external/EASTL/include/EASTL/bonus/call_traits.h new file mode 100644 index 00000000..0995d051 --- /dev/null +++ b/external/EASTL/include/EASTL/bonus/call_traits.h @@ -0,0 +1,117 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// The design for call_traits here is very similar to that found in template +// metaprogramming libraries such as Boost, GCC, and Metrowerks, given that +// these libraries have established this interface as a defacto standard for +// solving this problem. Also, these are described in various books on the +// topic of template metaprogramming, such as "Modern C++ Design". +// +// See http://www.boost.org/libs/utility/call_traits.htm or search for +// call_traits in Google for a description of call_traits. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_CALL_TRAITS_H +#define EASTL_CALL_TRAITS_H + + +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + + + template + struct ct_imp2 { typedef const T& param_type; }; + + template + struct ct_imp2 { typedef const T param_type; }; + + template + struct ct_imp { typedef const T& param_type; }; + + template + struct ct_imp { typedef typename ct_imp2::param_type param_type; }; + + template + struct ct_imp { typedef T const param_type; }; + + + + template + struct call_traits + { + public: + typedef T value_type; + typedef T& reference; + typedef const T& const_reference; + typedef typename ct_imp::value, is_arithmetic::value>::param_type param_type; + }; + + + template + struct call_traits + { + typedef T& value_type; + typedef T& reference; + typedef const T& const_reference; + typedef T& param_type; + }; + + + template + struct call_traits + { + private: + typedef T array_type[N]; + + public: + typedef const T* value_type; + typedef array_type& reference; + typedef const array_type& const_reference; + typedef const T* const param_type; + }; + + + template + struct call_traits + { + private: + typedef const T array_type[N]; + + public: + typedef const T* value_type; + typedef array_type& reference; + typedef const array_type& const_reference; + typedef const T* const param_type; + }; + + +} // namespace eastl + + +#endif // Header include guard + + + + + + + + + + + + + + diff --git a/external/EASTL/include/EASTL/bonus/compressed_pair.h b/external/EASTL/include/EASTL/bonus/compressed_pair.h new file mode 100644 index 00000000..379642ba --- /dev/null +++ b/external/EASTL/include/EASTL/bonus/compressed_pair.h @@ -0,0 +1,460 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// The compressed pair class is very similar to std::pair, but if either of the +// template arguments are empty classes, then the "empty base-class optimization" +// is applied to compress the size of the pair. +// +// The design for compressed_pair here is very similar to that found in template +// metaprogramming libraries such as Boost, GCC, and Metrowerks, given that +// these libraries have established this interface as a defacto standard for +// solving this problem. Also, these are described in various books on the +// topic of template metaprogramming, such as "Modern C++ Design". +// +// template +// class compressed_pair +// { +// public: +// typedef T1 first_type; +// typedef T2 second_type; +// typedef typename call_traits::param_type first_param_type; +// typedef typename call_traits::param_type second_param_type; +// typedef typename call_traits::reference first_reference; +// typedef typename call_traits::reference second_reference; +// typedef typename call_traits::const_reference first_const_reference; +// typedef typename call_traits::const_reference second_const_reference; +// +// compressed_pair() : base() {} +// compressed_pair(first_param_type x, second_param_type y); +// explicit compressed_pair(first_param_type x); +// explicit compressed_pair(second_param_type y); +// +// compressed_pair& operator=(const compressed_pair&); +// +// first_reference first(); +// first_const_reference first() const; +// +// second_reference second(); +// second_const_reference second() const; +// +// void swap(compressed_pair& y); +// }; +// +// The two members of the pair can be accessed using the member functions first() +// and second(). Note that not all member functions can be instantiated for all +// template parameter types. In particular compressed_pair can be instantiated for +// reference and array types, however in these cases the range of constructors that +// can be used are limited. If types T1 and T2 are the same type, then there is +// only one version of the single-argument constructor, and this constructor +// initialises both values in the pair to the passed value. +// +// Note that compressed_pair can not be instantiated if either of the template +// arguments is a union type, unless there is compiler support for is_union, +// or if is_union is specialised for the union type. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_COMPRESSED_PAIR_H +#define EASTL_COMPRESSED_PAIR_H + + +#include +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + +#if defined(_MSC_VER) && (_MSC_VER >= 1900) // VS2015 or later + EA_DISABLE_VC_WARNING(4626 5027) // warning C4626: 'eastl::compressed_pair_imp': assignment operator was implicitly defined as deleted because a base class assignment operator is inaccessible or deleted +#endif + +namespace eastl +{ + + template + class compressed_pair; + + + template + struct compressed_pair_switch; + + template + struct compressed_pair_switch{ static const int value = 0; }; + + template + struct compressed_pair_switch { static const int value = 1; }; + + template + struct compressed_pair_switch { static const int value = 2; }; + + template + struct compressed_pair_switch { static const int value = 3; }; + + template + struct compressed_pair_switch { static const int value = 4; }; + + template + struct compressed_pair_switch { static const int value = 5; }; + + template + class compressed_pair_imp; + + + + template + inline void cp_swap(T& t1, T& t2) + { + T tTemp = t1; + t1 = t2; + t2 = tTemp; + } + + + // Derive from neither + template + class compressed_pair_imp + { + public: + typedef T1 first_type; + typedef T2 second_type; + typedef typename call_traits::param_type first_param_type; + typedef typename call_traits::param_type second_param_type; + typedef typename call_traits::reference first_reference; + typedef typename call_traits::reference second_reference; + typedef typename call_traits::const_reference first_const_reference; + typedef typename call_traits::const_reference second_const_reference; + + compressed_pair_imp() {} + + compressed_pair_imp(first_param_type x, second_param_type y) + : mFirst(x), mSecond(y) {} + + compressed_pair_imp(first_param_type x) + : mFirst(x) {} + + compressed_pair_imp(second_param_type y) + : mSecond(y) {} + + first_reference first() { return mFirst; } + first_const_reference first() const { return mFirst; } + + second_reference second() { return mSecond; } + second_const_reference second() const { return mSecond; } + + void swap(compressed_pair& y) + { + cp_swap(mFirst, y.first()); + cp_swap(mSecond, y.second()); + } + + private: + first_type mFirst; + second_type mSecond; + }; + + + // Derive from T1 + template + class compressed_pair_imp : private T1 + { + public: + typedef T1 first_type; + typedef T2 second_type; + typedef typename call_traits::param_type first_param_type; + typedef typename call_traits::param_type second_param_type; + typedef typename call_traits::reference first_reference; + typedef typename call_traits::reference second_reference; + typedef typename call_traits::const_reference first_const_reference; + typedef typename call_traits::const_reference second_const_reference; + + compressed_pair_imp() {} + + compressed_pair_imp(first_param_type x, second_param_type y) + : first_type(x), mSecond(y) {} + + compressed_pair_imp(first_param_type x) + : first_type(x) {} + + compressed_pair_imp(second_param_type y) + : mSecond(y) {} + + first_reference first() { return *this; } + first_const_reference first() const { return *this; } + + second_reference second() { return mSecond; } + second_const_reference second() const { return mSecond; } + + void swap(compressed_pair& y) + { + // No need to swap empty base class + cp_swap(mSecond, y.second()); + } + + private: + second_type mSecond; + }; + + + + // Derive from T2 + template + class compressed_pair_imp : private T2 + { + public: + typedef T1 first_type; + typedef T2 second_type; + typedef typename call_traits::param_type first_param_type; + typedef typename call_traits::param_type second_param_type; + typedef typename call_traits::reference first_reference; + typedef typename call_traits::reference second_reference; + typedef typename call_traits::const_reference first_const_reference; + typedef typename call_traits::const_reference second_const_reference; + + compressed_pair_imp() {} + + compressed_pair_imp(first_param_type x, second_param_type y) + : second_type(y), mFirst(x) {} + + compressed_pair_imp(first_param_type x) + : mFirst(x) {} + + compressed_pair_imp(second_param_type y) + : second_type(y) {} + + first_reference first() { return mFirst; } + first_const_reference first() const { return mFirst; } + + second_reference second() { return *this; } + second_const_reference second() const { return *this; } + + void swap(compressed_pair& y) + { + // No need to swap empty base class + cp_swap(mFirst, y.first()); + } + + private: + first_type mFirst; + }; + + + + // Derive from T1 and T2 + template + class compressed_pair_imp : private T1, private T2 + { + public: + typedef T1 first_type; + typedef T2 second_type; + typedef typename call_traits::param_type first_param_type; + typedef typename call_traits::param_type second_param_type; + typedef typename call_traits::reference first_reference; + typedef typename call_traits::reference second_reference; + typedef typename call_traits::const_reference first_const_reference; + typedef typename call_traits::const_reference second_const_reference; + + compressed_pair_imp() {} + + compressed_pair_imp(first_param_type x, second_param_type y) + : first_type(x), second_type(y) {} + + compressed_pair_imp(first_param_type x) + : first_type(x) {} + + compressed_pair_imp(second_param_type y) + : second_type(y) {} + + first_reference first() { return *this; } + first_const_reference first() const { return *this; } + + second_reference second() { return *this; } + second_const_reference second() const { return *this; } + + // No need to swap empty bases + void swap(compressed_pair&) + { } + }; + + + // T1 == T2, T1 and T2 are both empty + // Note does not actually store an instance of T2 at all; + // but reuses T1 base class for both first() and second(). + template + class compressed_pair_imp : private T1 + { + public: + typedef T1 first_type; + typedef T2 second_type; + typedef typename call_traits::param_type first_param_type; + typedef typename call_traits::param_type second_param_type; + typedef typename call_traits::reference first_reference; + typedef typename call_traits::reference second_reference; + typedef typename call_traits::const_reference first_const_reference; + typedef typename call_traits::const_reference second_const_reference; + + compressed_pair_imp() {} + + compressed_pair_imp(first_param_type x, second_param_type) + : first_type(x) {} + + compressed_pair_imp(first_param_type x) + : first_type(x) {} + + first_reference first() { return *this; } + first_const_reference first() const { return *this; } + + second_reference second() { return *this; } + second_const_reference second() const { return *this; } + + void swap(compressed_pair&) { } + }; + + + // T1 == T2 and are not empty + template + class compressed_pair_imp + { + public: + typedef T1 first_type; + typedef T2 second_type; + typedef typename call_traits::param_type first_param_type; + typedef typename call_traits::param_type second_param_type; + typedef typename call_traits::reference first_reference; + typedef typename call_traits::reference second_reference; + typedef typename call_traits::const_reference first_const_reference; + typedef typename call_traits::const_reference second_const_reference; + + compressed_pair_imp() {} + + compressed_pair_imp(first_param_type x, second_param_type y) + : mFirst(x), mSecond(y) {} + + compressed_pair_imp(first_param_type x) + : mFirst(x), mSecond(x) {} + + first_reference first() { return mFirst; } + first_const_reference first() const { return mFirst; } + + second_reference second() { return mSecond; } + second_const_reference second() const { return mSecond; } + + void swap(compressed_pair& y) + { + cp_swap(mFirst, y.first()); + cp_swap(mSecond, y.second()); + } + + private: + first_type mFirst; + second_type mSecond; + }; + + + + template + class compressed_pair + : private compressed_pair_imp::type, typename remove_cv::type>::value, + is_empty::value, + is_empty::value>::value> + { + private: + typedef compressed_pair_imp::type, typename remove_cv::type>::value, + is_empty::value, + is_empty::value>::value> base; + public: + typedef T1 first_type; + typedef T2 second_type; + typedef typename call_traits::param_type first_param_type; + typedef typename call_traits::param_type second_param_type; + typedef typename call_traits::reference first_reference; + typedef typename call_traits::reference second_reference; + typedef typename call_traits::const_reference first_const_reference; + typedef typename call_traits::const_reference second_const_reference; + + compressed_pair() : base() {} + compressed_pair(first_param_type x, second_param_type y) : base(x, y) {} + explicit compressed_pair(first_param_type x) : base(x) {} + explicit compressed_pair(second_param_type y) : base(y) {} + + first_reference first() { return base::first(); } + first_const_reference first() const { return base::first(); } + + second_reference second() { return base::second(); } + second_const_reference second() const { return base::second(); } + + void swap(compressed_pair& y) { base::swap(y); } + }; + + + // Partial specialisation for case where T1 == T2: + template + class compressed_pair + : private compressed_pair_imp::type, typename remove_cv::type>::value, + is_empty::value, + is_empty::value>::value> + { + private: + typedef compressed_pair_imp::type, typename remove_cv::type>::value, + is_empty::value, + is_empty::value>::value> base; + public: + typedef T first_type; + typedef T second_type; + typedef typename call_traits::param_type first_param_type; + typedef typename call_traits::param_type second_param_type; + typedef typename call_traits::reference first_reference; + typedef typename call_traits::reference second_reference; + typedef typename call_traits::const_reference first_const_reference; + typedef typename call_traits::const_reference second_const_reference; + + compressed_pair() : base() {} + compressed_pair(first_param_type x, second_param_type y) : base(x, y) {} + explicit compressed_pair(first_param_type x) : base(x) {} + + first_reference first() { return base::first(); } + first_const_reference first() const { return base::first(); } + + second_reference second() { return base::second(); } + second_const_reference second() const { return base::second(); } + + void swap(compressed_pair& y) { base::swap(y); } + }; + + + template + inline void swap(compressed_pair& x, compressed_pair& y) + { + x.swap(y); + } + + +} // namespace eastl + +#if defined(_MSC_VER) && (_MSC_VER >= 1900) // VS2015 or later + EA_RESTORE_VC_WARNING() +#endif + +#endif // Header include guard + + + diff --git a/external/EASTL/include/EASTL/bonus/fixed_ring_buffer.h b/external/EASTL/include/EASTL/bonus/fixed_ring_buffer.h new file mode 100644 index 00000000..2bb54e47 --- /dev/null +++ b/external/EASTL/include/EASTL/bonus/fixed_ring_buffer.h @@ -0,0 +1,50 @@ +/////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_FIXED_RING_BUFFER_H +#define EASTL_FIXED_RING_BUFFER_H + +#include +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + +namespace eastl +{ + + /// fixed_ring_buffer + /// + /// This is a convenience template alias for creating a fixed-sized + /// ring_buffer using eastl::fixed_vector as its storage container. This has + /// been tricky for users to get correct due to the constructor requirements + /// of eastl::ring_buffer leaking the implementation detail of the sentinel + /// value being used internally. In addition, it was not obvious what the + /// correct allocator_type template parameter should be used for containers + /// providing both a default allocator type and an overflow allocator type. + /// + /// We are over-allocating the fixed_vector container to accommodate the + /// ring_buffer sentinel to prevent that implementation detail leaking into + /// user code. + /// + /// Example usage: + /// + /// fixed_ring_buffer rb = {0, 1, 2, 3, 4, 5, 6, 7}; + /// or + /// fixed_ring_buffer rb(8); // capacity doesn't need to respect sentinel + /// rb.push_back(0); + /// + /// +#if !defined(EA_COMPILER_NO_TEMPLATE_ALIASES) + template + using fixed_ring_buffer = + ring_buffer, typename fixed_vector::overflow_allocator_type>; +#endif + +} // namespace eastl + +#endif // Header include guard + diff --git a/external/EASTL/include/EASTL/bonus/fixed_tuple_vector.h b/external/EASTL/include/EASTL/bonus/fixed_tuple_vector.h new file mode 100644 index 00000000..e9ce0ec0 --- /dev/null +++ b/external/EASTL/include/EASTL/bonus/fixed_tuple_vector.h @@ -0,0 +1,210 @@ +/////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_FIXEDTUPLEVECTOR_H +#define EASTL_FIXEDTUPLEVECTOR_H + +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + +namespace eastl +{ + + /// EASTL_FIXED_TUPLE_VECTOR_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// In the case of fixed-size containers, the allocator name always refers + /// to overflow allocations. + /// + #ifndef EASTL_FIXED_TUPLE_VECTOR_DEFAULT_NAME + #define EASTL_FIXED_TUPLE_VECTOR_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_tuple_vector" // Unless the user overrides something, this is "EASTL fixed_vector". + #endif + + + /// EASTL_FIXED_TUPLE_VECTOR_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_FIXED_TUPLE_VECTOR_DEFAULT_ALLOCATOR + #define EASTL_FIXED_TUPLE_VECTOR_DEFAULT_ALLOCATOR overflow_allocator_type(EASTL_FIXED_TUPLE_VECTOR_DEFAULT_NAME) + #endif + +// External interface of fixed_tuple_vector +template +class fixed_tuple_vector : public TupleVecInternal::TupleVecImpl::GetTotalAllocationSize(nodeCount, 0), 1, + TupleVecInternal::TupleRecurser::GetTotalAlignment(), 0, + bEnableOverflow, EASTLAllocatorType>, make_index_sequence, Ts...> +{ +public: + typedef fixed_vector_allocator< + TupleVecInternal::TupleRecurser::GetTotalAllocationSize(nodeCount, 0), 1, + TupleVecInternal::TupleRecurser::GetTotalAlignment(), 0, + bEnableOverflow, EASTLAllocatorType> fixed_allocator_type; + typedef aligned_buffer aligned_buffer_type; + typedef fixed_tuple_vector this_type; + typedef EASTLAllocatorType overflow_allocator_type; + + typedef TupleVecInternal::TupleVecImpl, Ts...> base_type; + typedef typename base_type::size_type size_type; + +private: + aligned_buffer_type mBuffer; + +public: + fixed_tuple_vector() + : base_type(fixed_allocator_type(mBuffer.buffer), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize) + { } + + fixed_tuple_vector(const overflow_allocator_type& allocator) + : base_type(fixed_allocator_type(mBuffer.buffer, allocator), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize) + { } + + fixed_tuple_vector(this_type&& x) + : base_type(fixed_allocator_type(mBuffer.buffer), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize) + { + base_type::get_allocator().copy_overflow_allocator(x.get_allocator()); + base_type::DoInitFromIterator(make_move_iterator(x.begin()), make_move_iterator(x.end())); + x.clear(); + } + + fixed_tuple_vector(this_type&& x, const overflow_allocator_type& allocator) + : base_type(fixed_allocator_type(mBuffer.buffer, allocator), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize) + { + base_type::DoInitFromIterator(make_move_iterator(x.begin()), make_move_iterator(x.end())); + x.clear(); + } + + fixed_tuple_vector(const this_type& x) + : base_type(fixed_allocator_type(mBuffer.buffer), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize) + { + base_type::get_allocator().copy_overflow_allocator(x.get_allocator()); + base_type::DoInitFromIterator(x.begin(), x.end()); + } + + fixed_tuple_vector(const this_type& x, const overflow_allocator_type& allocator) + : base_type(fixed_allocator_type(mBuffer.buffer, allocator), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize) + { + base_type::DoInitFromIterator(x.begin(), x.end()); + } + + template + fixed_tuple_vector(move_iterator begin, move_iterator end, const overflow_allocator_type& allocator = EASTL_FIXED_TUPLE_VECTOR_DEFAULT_ALLOCATOR) + : base_type(fixed_allocator_type(mBuffer.buffer, allocator), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize) + { + base_type::DoInitFromIterator(begin, end); + } + + template + fixed_tuple_vector(Iterator begin, Iterator end, const overflow_allocator_type& allocator = EASTL_FIXED_TUPLE_VECTOR_DEFAULT_ALLOCATOR) + : base_type(fixed_allocator_type(mBuffer.buffer, allocator), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize) + { + base_type::DoInitFromIterator(begin, end); + } + + fixed_tuple_vector(size_type n, const overflow_allocator_type& allocator = EASTL_FIXED_TUPLE_VECTOR_DEFAULT_ALLOCATOR) + : base_type(fixed_allocator_type(mBuffer.buffer, allocator), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize) + { + base_type::DoInitDefaultFill(n); + } + + fixed_tuple_vector(size_type n, const Ts&... args) + : base_type(fixed_allocator_type(mBuffer.buffer), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize) + { + base_type::DoInitFillArgs(n, args...); + } + + fixed_tuple_vector(size_type n, const Ts&... args, const overflow_allocator_type& allocator) + : base_type(fixed_allocator_type(mBuffer.buffer, allocator), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize) + { + base_type::DoInitFillArgs(n, args...); + } + + fixed_tuple_vector(size_type n, + typename base_type::const_reference_tuple tup, + const overflow_allocator_type& allocator = EASTL_FIXED_TUPLE_VECTOR_DEFAULT_ALLOCATOR) + : base_type(fixed_allocator_type(mBuffer.buffer, allocator), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize) + { + base_type::DoInitFillTuple(n, tup); + } + + fixed_tuple_vector(const typename base_type::value_tuple* first, const typename base_type::value_tuple* last, + const overflow_allocator_type& allocator = EASTL_FIXED_TUPLE_VECTOR_DEFAULT_ALLOCATOR) + : base_type(fixed_allocator_type(mBuffer.buffer, allocator), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize) + { + base_type::DoInitFromTupleArray(first, last); + } + + fixed_tuple_vector(std::initializer_list iList, + const overflow_allocator_type& allocator = EASTL_FIXED_TUPLE_VECTOR_DEFAULT_ALLOCATOR) + : base_type(fixed_allocator_type(mBuffer.buffer, allocator), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize) + { + base_type::DoInitFromTupleArray(iList.begin(), iList.end()); + } + + this_type& operator=(const this_type& other) + { + base_type::operator=(other); + return *this; + } + + this_type& operator=(this_type&& other) + { + base_type::clear(); + // OK to call DoInitFromIterator in a non-ctor scenario because clear() reset everything, more-or-less + base_type::DoInitFromIterator(make_move_iterator(other.begin()), make_move_iterator(other.end())); + other.clear(); + return *this; + } + + this_type& operator=(std::initializer_list iList) + { + base_type::operator=(iList); + return *this; + } + + void swap(this_type& x) + { + // If both containers are using the heap instead of local memory + // then we can do a fast pointer swap instead of content swap. + if ((has_overflowed() && x.has_overflowed()) && (get_overflow_allocator() == x.get_overflow_allocator())) + { + base_type::swap(x); + } + else + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(*this, x); + } + } + + // Returns the max fixed size, which is the user-supplied nodeCount parameter. + size_type max_size() const { return nodeCount; } + // Returns true if the fixed space has been fully allocated. Note that if overflow is enabled, + // the container size can be greater than nodeCount but full() could return true because the + // fixed space may have a recently freed slot. + bool full() const { return (base_type::mNumElements >= nodeCount) || ((void*)base_type::mpData != (void*)mBuffer.buffer); } + // Returns true if the allocations spilled over into the overflow allocator. Meaningful + // only if overflow is enabled. + bool has_overflowed() const { return ((void*)base_type::mpData != (void*)mBuffer.buffer); } + // Returns the value of the bEnableOverflow template parameter. + bool can_overflow() const { return bEnableOverflow; } + + const overflow_allocator_type& get_overflow_allocator() const { return base_type::get_allocator().get_overflow_allocator(); } +}; + + +template +inline void swap(fixed_tuple_vector& a, + fixed_tuple_vector& b) +{ + a.swap(b); +} + + +} // namespace eastl + +#endif // EASTL_TUPLEVECTOR_H diff --git a/external/EASTL/include/EASTL/bonus/intrusive_sdlist.h b/external/EASTL/include/EASTL/bonus/intrusive_sdlist.h new file mode 100644 index 00000000..ab51f1b4 --- /dev/null +++ b/external/EASTL/include/EASTL/bonus/intrusive_sdlist.h @@ -0,0 +1,694 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// intrusive_sdlist is a special kind of intrusive list which we say is +// "singly-doubly" linked. Instead of having a typical intrusive list node +// which looks like this: +// +// struct intrusive_sdlist_node { +// intrusive_sdlist_node *mpNext; +// intrusive_sdlist_node *mpPrev; +// }; +// +// We instead have one that looks like this: +// +// struct intrusive_sdlist_node { +// intrusive_sdlist_node* mpNext; +// intrusive_sdlist_node** mppPrevNext; +// }; +// +// This may seem to be suboptimal, but it has one specific advantage: it allows +// the intrusive_sdlist class to be the size of only one pointer instead of two. +// This may seem like a minor optimization, but some users have wanted to create +// thousands of empty instances of these. +// This is because while an intrusive_list class looks like this: +// +// class intrusive_list { +// intrusive_list_node mBaseNode; +// }; +// +// an intrusive_sdlist class looks like this: +// +// class intrusive_sdlist { +// intrusive_sdlist_node* mpNext; +// }; +// +// So here we make a list of plusses and minuses of intrusive sdlists +// compared to intrusive_lists and intrusive_slists: +// +// | list | slist | sdlist +// --------------------------------------------------------- +// min size | 8 | 4 | 4 +// node size | 8 | 4 | 8 +// anonymous erase | yes | no | yes +// reverse iteration | yes | no | no +// +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTRUSIVE_SDLIST_H +#define EASTL_INTRUSIVE_SDLIST_H + + +#include +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + + + /// intrusive_sdlist_node + /// + struct intrusive_sdlist_node + { + intrusive_sdlist_node* mpNext; + intrusive_sdlist_node** mppPrevNext; + }; + + + /// IntrusiveSDListIterator + /// + template + struct IntrusiveSDListIterator + { + typedef IntrusiveSDListIterator this_type; + typedef IntrusiveSDListIterator iterator; + typedef IntrusiveSDListIterator const_iterator; + typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t. + typedef ptrdiff_t difference_type; + typedef T value_type; + typedef T node_type; + typedef Pointer pointer; + typedef Reference reference; + typedef EASTL_ITC_NS::forward_iterator_tag iterator_category; + + public: + pointer mpNode; + + public: + IntrusiveSDListIterator(); + explicit IntrusiveSDListIterator(pointer pNode); // Note that you can also construct an iterator from T via this, since value_type == node_type. + IntrusiveSDListIterator(const iterator& x); + + reference operator*() const; + pointer operator->() const; + + this_type& operator++(); + this_type operator++(int); + + }; // struct IntrusiveSDListIterator + + + + + /// intrusive_sdlist_base + /// + /// Provides a template-less base class for intrusive_sdlist. + /// + class intrusive_sdlist_base + { + public: + typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t. + typedef ptrdiff_t difference_type; + + protected: + intrusive_sdlist_node* mpNext; + + public: + intrusive_sdlist_base(); + + bool empty() const; ///< Returns true if the container is empty. + size_type size() const; ///< Returns the number of elements in the list; O(n). + + void clear(); ///< Clears the list; O(1). No deallocation occurs. + void pop_front(); ///< Removes an element from the front of the list; O(1). The element must be present, but is not deallocated. + void reverse(); ///< Reverses a list so that front and back are swapped; O(n). + + //bool validate() const; ///< Scans a list for linkage inconsistencies; O(n) time, O(1) space. Returns false if errors are detected, such as loops or branching. + + }; // class intrusive_sdlist_base + + + + /// intrusive_sdlist + /// + template + class intrusive_sdlist : public intrusive_sdlist_base + { + public: + typedef intrusive_sdlist this_type; + typedef intrusive_sdlist_base base_type; + typedef T node_type; + typedef T value_type; + typedef typename base_type::size_type size_type; + typedef typename base_type::difference_type difference_type; + typedef T& reference; + typedef const T& const_reference; + typedef T* pointer; + typedef const T* const_pointer; + typedef IntrusiveSDListIterator iterator; + typedef IntrusiveSDListIterator const_iterator; + typedef eastl::reverse_iterator reverse_iterator; + typedef eastl::reverse_iterator const_reverse_iterator; + + public: + intrusive_sdlist(); ///< Creates an empty list. + intrusive_sdlist(const this_type& x); ///< Creates an empty list; ignores the argument. + this_type& operator=(const this_type& x); ///< Clears the list; ignores the argument. + + iterator begin(); ///< Returns an iterator pointing to the first element in the list. + const_iterator begin() const; ///< Returns a const_iterator pointing to the first element in the list. + const_iterator cbegin() const; ///< Returns a const_iterator pointing to the first element in the list. + + iterator end(); ///< Returns an iterator pointing one-after the last element in the list. + const_iterator end() const; ///< Returns a const_iterator pointing one-after the last element in the list. + const_iterator cend() const; ///< Returns a const_iterator pointing one-after the last element in the list. + + reference front(); ///< Returns a reference to the first element. The list must not be empty. + const_reference front() const; ///< Returns a const reference to the first element. The list must not be empty. + + void push_front(value_type& value); ///< Adds an element to the front of the list; O(1). The element is not copied. The element must not be in any other list. + void push_back(value_type& value); ///< Adds an element to the back of the list; O(N). The element is not copied. The element must not be in any other list. + void pop_back(); ///< Removes an element from the back of the list; O(N). The element must be present, but is not deallocated. + + bool contains(const value_type& value) const; ///< Returns true if the given element is in the list; O(n). Equivalent to (locate(x) != end()). + + iterator locate(value_type& value); ///< Converts a reference to an object in the list back to an iterator, or returns end() if it is not part of the list. O(n) + const_iterator locate(const value_type& value) const; ///< Converts a const reference to an object in the list back to a const iterator, or returns end() if it is not part of the list. O(n) + + iterator insert(iterator position, value_type& value); ///< Inserts an element before the element pointed to by the iterator. O(1) + iterator erase(iterator position); ///< Erases the element pointed to by the iterator. O(1) + iterator erase(iterator first, iterator last); ///< Erases elements within the iterator range [first, last). O(1). + void swap(intrusive_sdlist& x); ///< Swaps the contents of two intrusive lists; O(1). + + static void remove(value_type& value); ///< Erases an element from a list; O(1). Note that this is static so you don't need to know which list the element, although it must be in some list. + + void splice(iterator position, value_type& value); ///< Moves the given element into this list before the element pointed to by position; O(1). + ///< Required: x must be in some list or have first/next pointers that point it itself. + + void splice(iterator position, this_type& x); ///< Moves the contents of a list into this list before the element pointed to by position; O(1). + ///< Required: &x != this (same as std::list). + + void splice(iterator position, this_type& x, iterator xPosition); ///< Moves the given element pointed to i within the list x into the current list before + ///< the element pointed to by position; O(1). + + void splice(iterator position, this_type& x, iterator first, iterator last); ///< Moves the range of elements [first, last) from list x into the current list before + ///< the element pointed to by position; O(1). + ///< Required: position must not be in [first, last). (same as std::list). + bool validate() const; + int validate_iterator(const_iterator i) const; + + }; // intrusive_sdlist + + + + + /////////////////////////////////////////////////////////////////////// + // IntrusiveSDListIterator functions + /////////////////////////////////////////////////////////////////////// + + template + inline IntrusiveSDListIterator::IntrusiveSDListIterator() + { + #if EASTL_DEBUG + mpNode = NULL; + #endif + } + + template + inline IntrusiveSDListIterator::IntrusiveSDListIterator(pointer pNode) + : mpNode(pNode) + { + } + + template + inline IntrusiveSDListIterator::IntrusiveSDListIterator(const iterator& x) + : mpNode(x.mpNode) + { + } + + template + inline typename IntrusiveSDListIterator::reference + IntrusiveSDListIterator::operator*() const + { + return *mpNode; + } + + template + inline typename IntrusiveSDListIterator::pointer + IntrusiveSDListIterator::operator->() const + { + return mpNode; + } + + template + inline typename IntrusiveSDListIterator::this_type& + IntrusiveSDListIterator::operator++() + { + mpNode = static_cast(mpNode->mpNext); + return *this; + } + + template + inline typename IntrusiveSDListIterator::this_type + IntrusiveSDListIterator::operator++(int) + { + this_type temp = *this; + mpNode = static_cast(mpNode->mpNext); + return temp; + } + + // The C++ defect report #179 requires that we support comparisons between const and non-const iterators. + // Thus we provide additional template paremeters here to support this. The defect report does not + // require us to support comparisons between reverse_iterators and const_reverse_iterators. + template + inline bool operator==(const IntrusiveSDListIterator& a, + const IntrusiveSDListIterator& b) + { + return a.mpNode == b.mpNode; + } + + + template + inline bool operator!=(const IntrusiveSDListIterator& a, + const IntrusiveSDListIterator& b) + { + return a.mpNode != b.mpNode; + } + + + // We provide a version of operator!= for the case where the iterators are of the + // same type. This helps prevent ambiguity errors in the presence of rel_ops. + template + inline bool operator!=(const IntrusiveSDListIterator& a, + const IntrusiveSDListIterator& b) + { + return a.mpNode != b.mpNode; + } + + + + /////////////////////////////////////////////////////////////////////// + // intrusive_sdlist_base + /////////////////////////////////////////////////////////////////////// + + inline intrusive_sdlist_base::intrusive_sdlist_base() + { mpNext = NULL; } + + + inline bool intrusive_sdlist_base::empty() const + { return mpNext == NULL; } + + + inline intrusive_sdlist_base::size_type intrusive_sdlist_base::size() const + { + size_type n = 0; + for(const intrusive_sdlist_node* pCurrent = mpNext; pCurrent; pCurrent = pCurrent->mpNext) + n++; + return n; + } + + + inline void intrusive_sdlist_base::clear() + { mpNext = NULL; } // Note that we don't do anything with the list nodes. + + + inline void intrusive_sdlist_base::pop_front() + { + // To consider: Set mpNext's pointers to NULL in debug builds. + mpNext = mpNext->mpNext; + mpNext->mppPrevNext = &mpNext; + } + + + + /////////////////////////////////////////////////////////////////////// + // intrusive_sdlist + /////////////////////////////////////////////////////////////////////// + + template + inline intrusive_sdlist::intrusive_sdlist() + { + } + + + template + inline intrusive_sdlist::intrusive_sdlist(const this_type& /*x*/) + : intrusive_sdlist_base() + { + // We intentionally ignore argument x. + } + + + template + inline typename intrusive_sdlist::this_type& intrusive_sdlist::operator=(const this_type& /*x*/) + { + return *this; // We intentionally ignore argument x. + } + + + template + inline typename intrusive_sdlist::iterator intrusive_sdlist::begin() + { return iterator(static_cast(mpNext)); } + + + template + inline typename intrusive_sdlist::const_iterator intrusive_sdlist::begin() const + { return const_iterator(static_cast(const_cast(mpNext))); } + + + template + inline typename intrusive_sdlist::const_iterator intrusive_sdlist::cbegin() const + { return const_iterator(static_cast(const_cast(mpNext))); } + + + template + inline typename intrusive_sdlist::iterator intrusive_sdlist::end() + { return iterator(static_cast(NULL)); } + + + template + inline typename intrusive_sdlist::const_iterator intrusive_sdlist::end() const + { return const_iterator(static_cast(NULL)); } + + + template + inline typename intrusive_sdlist::const_iterator intrusive_sdlist::cend() const + { return const_iterator(static_cast(NULL)); } + + + template + inline typename intrusive_sdlist::reference intrusive_sdlist::front() + { return *static_cast(mpNext); } + + + template + inline typename intrusive_sdlist::const_reference intrusive_sdlist::front() const + { return *static_cast(mpNext); } + + + template + inline void intrusive_sdlist::push_front(value_type& value) + { + value.mpNext = mpNext; + value.mppPrevNext = &mpNext; + if(mpNext) + mpNext->mppPrevNext = &value.mpNext; + mpNext = &value; + } + + + template + inline void intrusive_sdlist::push_back(value_type& value) + { + intrusive_sdlist_node* pNext = mpNext; + intrusive_sdlist_node** ppPrevNext = &mpNext; + + while(pNext) + { + ppPrevNext = &pNext->mpNext; + pNext = pNext->mpNext; + } + + *ppPrevNext = &value; + value.mppPrevNext = ppPrevNext; + value.mpNext = NULL; + } + + + template + inline void intrusive_sdlist::pop_back() + { + node_type* pCurrent = static_cast(mpNext); + + while(pCurrent->mpNext) + pCurrent = static_cast(pCurrent->mpNext); + + *pCurrent->mppPrevNext = NULL; + } + + template + inline bool intrusive_sdlist::contains(const value_type& value) const + { + const intrusive_sdlist_node* pCurrent; + + for(pCurrent = mpNext; pCurrent; pCurrent = pCurrent->mpNext) + { + if(pCurrent == &value) + break; + } + + return (pCurrent != NULL); + } + + + template + inline typename intrusive_sdlist::iterator intrusive_sdlist::locate(value_type& value) + { + intrusive_sdlist_node* pCurrent; + + for(pCurrent = static_cast(mpNext); pCurrent; pCurrent = pCurrent->mpNext) + { + if(pCurrent == &value) + break; + } + + return iterator(static_cast(pCurrent)); + } + + + template + inline typename intrusive_sdlist::const_iterator intrusive_sdlist::locate(const T& value) const + { + const intrusive_sdlist_node* pCurrent; + + for(pCurrent = static_cast(mpNext); pCurrent; pCurrent = pCurrent->mpNext) + { + if(pCurrent == &value) + break; + } + + return const_iterator(static_cast(const_cast(pCurrent))); + } + + + template + inline typename intrusive_sdlist::iterator + intrusive_sdlist::insert(iterator position, value_type& value) + { + value.mppPrevNext = position.mpNode->mppPrevNext; + value.mpNext = position.mpNode; + *value.mppPrevNext = &value; + position.mpNode->mppPrevNext = &value.mpNext; + + return iterator(&value); + } + + + template + inline typename intrusive_sdlist::iterator + intrusive_sdlist::erase(iterator position) + { + *position.mpNode->mppPrevNext = position.mpNode->mpNext; + position.mpNode->mpNext->mppPrevNext = position.mpNode->mppPrevNext; + + return iterator(position.mpNode); + } + + + template + inline typename intrusive_sdlist::iterator + intrusive_sdlist::erase(iterator first, iterator last) + { + if(first.mpNode) // If not erasing the end... + { + *first.mpNode->mppPrevNext = last.mpNode; + + if(last.mpNode) // If not erasing to the end... + last.mpNode->mppPrevNext = first.mpNode->mppPrevNext; + } + + return last; + } + + + template + inline void intrusive_sdlist::remove(value_type& value) + { + *value.mppPrevNext = value.mpNext; + if(value.mpNext) + value.mpNext->mppPrevNext = value.mppPrevNext; + } + + + template + void intrusive_sdlist::swap(intrusive_sdlist& x) + { + // swap anchors + intrusive_sdlist_node* const temp(mpNext); + mpNext = x.mpNext; + x.mpNext = temp; + + if(x.mpNext) + x.mpNext->mppPrevNext = &mpNext; + + if(mpNext) + mpNext->mppPrevNext = &x.mpNext; + } + + + + + + // To do: Complete these splice functions. Might want to look at intrusive_sdlist for help. + + template + void intrusive_sdlist::splice(iterator /*position*/, value_type& /*value*/) + { + EASTL_ASSERT(false); // If you need this working, ask Paul Pedriana or submit a working version for inclusion. + } + + + template + void intrusive_sdlist::splice(iterator /*position*/, intrusive_sdlist& /*x*/) + { + EASTL_ASSERT(false); // If you need this working, ask Paul Pedriana or submit a working version for inclusion. + } + + + template + void intrusive_sdlist::splice(iterator /*position*/, intrusive_sdlist& /*x*/, iterator /*xPosition*/) + { + EASTL_ASSERT(false); // If you need this working, ask Paul Pedriana or submit a working version for inclusion. + } + + + template + void intrusive_sdlist::splice(iterator /*position*/, intrusive_sdlist& /*x*/, iterator /*first*/, iterator /*last*/) + { + EASTL_ASSERT(false); // If you need this working, ask Paul Pedriana or submit a working version for inclusion. + } + + + template + inline bool intrusive_sdlist::validate() const + { + return true; // To do. + } + + + template + inline int intrusive_sdlist::validate_iterator(const_iterator i) const + { + // To do: Come up with a more efficient mechanism of doing this. + + for(const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp) + { + if(temp == i) + return (isf_valid | isf_current | isf_can_dereference); + } + + if(i == end()) + return (isf_valid | isf_current); + + return isf_none; + } + + + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + bool operator==(const intrusive_sdlist& a, const intrusive_sdlist& b) + { + // If we store an mSize member for intrusive_sdlist, we want to take advantage of it here. + typename intrusive_sdlist::const_iterator ia = a.begin(); + typename intrusive_sdlist::const_iterator ib = b.begin(); + typename intrusive_sdlist::const_iterator enda = a.end(); + typename intrusive_sdlist::const_iterator endb = b.end(); + + while((ia != enda) && (ib != endb) && (*ia == *ib)) + { + ++ia; + ++ib; + } + return (ia == enda) && (ib == endb); + } + + template + bool operator<(const intrusive_sdlist& a, const intrusive_sdlist& b) + { + return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end()); + } + + template + bool operator!=(const intrusive_sdlist& a, const intrusive_sdlist& b) + { + return !(a == b); + } + + template + bool operator>(const intrusive_sdlist& a, const intrusive_sdlist& b) + { + return b < a; + } + + template + bool operator<=(const intrusive_sdlist& a, const intrusive_sdlist& b) + { + return !(b < a); + } + + template + bool operator>=(const intrusive_sdlist& a, const intrusive_sdlist& b) + { + return !(a < b); + } + + template + void swap(intrusive_sdlist& a, intrusive_sdlist& b) + { + a.swap(b); + } + + +} // namespace eastl + + +#endif // Header include guard + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/external/EASTL/include/EASTL/bonus/intrusive_slist.h b/external/EASTL/include/EASTL/bonus/intrusive_slist.h new file mode 100644 index 00000000..28d445d9 --- /dev/null +++ b/external/EASTL/include/EASTL/bonus/intrusive_slist.h @@ -0,0 +1,321 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +/////////////////////////////////////////////////////////////////////////////// +// *** Note *** +// This implementation is incomplete. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTRUSIVE_SLIST_H +#define EASTL_INTRUSIVE_SLIST_H + + +#include +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + + /// intrusive_slist_node + /// + struct intrusive_slist_node + { + intrusive_slist_node* mpNext; + }; + + + /// IntrusiveSListIterator + /// + template + struct IntrusiveSListIterator + { + typedef IntrusiveSListIterator this_type; + typedef IntrusiveSListIterator iterator; + typedef IntrusiveSListIterator const_iterator; + typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t. + typedef ptrdiff_t difference_type; + typedef T value_type; + typedef T node_type; + typedef Pointer pointer; + typedef Reference reference; + typedef EASTL_ITC_NS::forward_iterator_tag iterator_category; + + public: + node_type* mpNode; + + public: + IntrusiveSListIterator(); + explicit IntrusiveSListIterator(pointer pNode); // Note that you can also construct an iterator from T via this, since value_type == node_type. + IntrusiveSListIterator(const iterator& x); + + reference operator*() const; + pointer operator->() const; + + this_type& operator++(); + this_type operator++(int); + + }; // struct IntrusiveSListIterator + + + + /// intrusive_slist_base + /// + /// Provides a template-less base class for intrusive_slist. + /// + class intrusive_slist_base + { + public: + typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t. + typedef ptrdiff_t difference_type; + + protected: + intrusive_slist_node* mpNext; + + public: + intrusive_slist_base(); + + bool empty() const; ///< Returns true if the container is empty. + size_type size() const; ///< Returns the number of elements in the list; O(n). + + void clear(); ///< Clears the list; O(1). No deallocation occurs. + void pop_front(); ///< Removes an element from the front of the list; O(1). The element must be present, but is not deallocated. + void reverse(); ///< Reverses a list so that front and back are swapped; O(n). + + //bool validate() const; ///< Scans a list for linkage inconsistencies; O(n) time, O(1) space. Returns false if errors are detected, such as loops or branching. + + }; // class intrusive_slist_base + + + + /// intrusive_slist + /// + template + class intrusive_slist : public intrusive_slist_base + { + public: + typedef intrusive_slist this_type; + typedef intrusive_slist_base base_type; + typedef T node_type; + typedef T value_type; + typedef typename base_type::size_type size_type; + typedef typename base_type::difference_type difference_type; + typedef T& reference; + typedef const T& const_reference; + typedef T* pointer; + typedef const T* const_pointer; + typedef IntrusiveSListIterator iterator; + typedef IntrusiveSListIterator const_iterator; + + public: + intrusive_slist(); ///< Creates an empty list. + //intrusive_slist(const this_type& x); ///< Creates an empty list; ignores the argument. To consider: Is this a useful function? + //this_type& operator=(const this_type& x); ///< Clears the list; ignores the argument. To consider: Is this a useful function? + + iterator begin(); ///< Returns an iterator pointing to the first element in the list. O(1). + const_iterator begin() const; ///< Returns a const_iterator pointing to the first element in the list. O(1). + const_iterator cbegin() const; ///< Returns a const_iterator pointing to the first element in the list. O(1). + iterator end(); ///< Returns an iterator pointing one-after the last element in the list. O(1). + const_iterator end() const; ///< Returns a const_iterator pointing one-after the last element in the list. O(1). + const_iterator cend() const; ///< Returns a const_iterator pointing one-after the last element in the list. O(1). + iterator before_begin(); ///< Returns iterator to position before begin. O(1). + const_iterator before_begin() const; ///< Returns iterator to previous position. O(1). + const_iterator cbefore_begin() const; ///< Returns iterator to previous position. O(1). + + iterator previous(const_iterator position); ///< Returns iterator to previous position. O(n). + const_iterator previous(const_iterator position) const; ///< Returns iterator to previous position. O(n). + + reference front(); ///< Returns a reference to the first element. The list must be empty. + const_reference front() const; ///< Returns a const reference to the first element. The list must be empty. + + void push_front(value_type& value); ///< Adds an element to the front of the list; O(1). The element is not copied. The element must not be in any other list. + void pop_front(); ///< Removes an element from the back of the list; O(n). The element must be present, but is not deallocated. + + bool contains(const value_type& value) const; ///< Returns true if the given element is in the list; O(n). Equivalent to (locate(x) != end()). + + iterator locate(value_type& value); ///< Converts a reference to an object in the list back to an iterator, or returns end() if it is not part of the list. O(n) + const_iterator locate(const value_type& value) const; ///< Converts a const reference to an object in the list back to a const iterator, or returns end() if it is not part of the list. O(n) + + iterator insert(iterator position, value_type& value); ///< Inserts an element before the element pointed to by the iterator. O(n) + iterator insert_after(iterator position, value_type& value); ///< Inserts an element after the element pointed to by the iterator. O(1) + + iterator erase(iterator position); ///< Erases the element pointed to by the iterator. O(n) + iterator erase_after(iterator position); ///< Erases the element after the element pointed to by the iterator. O(1) + + iterator erase(iterator first, iterator last); ///< Erases elements within the iterator range [first, last). O(n). + iterator erase_after(iterator before_first, iterator last); ///< Erases elements within the iterator range [before_first, last). O(1). + + void swap(this_type& x); ///< Swaps the contents of two intrusive lists; O(1). + + + void splice(iterator position, value_type& value); ///< Moves the given element into this list before the element pointed to by position; O(n). + ///< Required: x must be in some list or have first/next pointers that point it itself. + + void splice(iterator position, this_type& x); ///< Moves the contents of a list into this list before the element pointed to by position; O(n). + ///< Required: &x != this (same as std::list). + + void splice(iterator position, this_type& x, iterator xPosition); ///< Moves the given element pointed to i within the list x into the current list before + ///< the element pointed to by position; O(n). + + void splice(iterator position, this_type& x, iterator first, iterator last); ///< Moves the range of elements [first, last) from list x into the current list before + ///< the element pointed to by position; O(n). + ///< Required: position must not be in [first, last). (same as std::list). + + void splice_after(iterator position, value_type& value); ///< Moves the given element into this list after the element pointed to by position; O(1). + ///< Required: x must be in some list or have first/next pointers that point it itself. + + void splice_after(iterator position, this_type& x); ///< Moves the contents of a list into this list after the element pointed to by position; O(n). + ///< Required: &x != this (same as std::list). + + void splice_after(iterator position, this_type& x, iterator xPrevious); ///< Moves the element after xPrevious to be after position. O(1). + ///< Required: &x != this (same as std::list). + + void splice_after(iterator position, this_type& x, iterator before_first, iterator before_last); ///< Moves the elements in the range of [before_first+1, before_last+1) to be after position. O(1). + + bool validate() const; + int validate_iterator(const_iterator i) const; + + }; // intrusive_slist + + + + + /////////////////////////////////////////////////////////////////////// + // IntrusiveSListIterator + /////////////////////////////////////////////////////////////////////// + + template + inline IntrusiveSListIterator::IntrusiveSListIterator() + { + #if EASTL_DEBUG + mpNode = NULL; + #endif + } + + template + inline IntrusiveSListIterator::IntrusiveSListIterator(pointer pNode) + : mpNode(pNode) + { + } + + template + inline IntrusiveSListIterator::IntrusiveSListIterator(const iterator& x) + : mpNode(x.mpNode) + { + } + + + /////////////////////////////////////////////////////////////////////// + // intrusive_slist_base + /////////////////////////////////////////////////////////////////////// + + // To do. + + + /////////////////////////////////////////////////////////////////////// + // intrusive_slist + /////////////////////////////////////////////////////////////////////// + + // To do. + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + bool operator==(const intrusive_slist& a, const intrusive_slist& b) + { + // If we store an mSize member for intrusive_slist, we want to take advantage of it here. + typename intrusive_slist::const_iterator ia = a.begin(); + typename intrusive_slist::const_iterator ib = b.begin(); + typename intrusive_slist::const_iterator enda = a.end(); + typename intrusive_slist::const_iterator endb = b.end(); + + while((ia != enda) && (ib != endb) && (*ia == *ib)) + { + ++ia; + ++ib; + } + return (ia == enda) && (ib == endb); + } + + template + bool operator<(const intrusive_slist& a, const intrusive_slist& b) + { + return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end()); + } + + template + bool operator!=(const intrusive_slist& a, const intrusive_slist& b) + { + return !(a == b); + } + + template + bool operator>(const intrusive_slist& a, const intrusive_slist& b) + { + return b < a; + } + + template + bool operator<=(const intrusive_slist& a, const intrusive_slist& b) + { + return !(b < a); + } + + template + bool operator>=(const intrusive_slist& a, const intrusive_slist& b) + { + return !(a < b); + } + + template + void swap(intrusive_slist& a, intrusive_slist& b) + { + a.swap(b); + } + +} // namespace eastl + + +#endif // Header include guard + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/external/EASTL/include/EASTL/bonus/list_map.h b/external/EASTL/include/EASTL/bonus/list_map.h new file mode 100644 index 00000000..21d69fd8 --- /dev/null +++ b/external/EASTL/include/EASTL/bonus/list_map.h @@ -0,0 +1,966 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_LIST_MAP_H +#define EASTL_LIST_MAP_H + + +#include + +// 4512/4626 - 'class' : assignment operator could not be generated. // This disabling would best be put elsewhere. +EA_DISABLE_VC_WARNING(4512 4626); + +namespace eastl +{ + + /// EASTL_MAP_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// + #ifndef EASTL_LIST_MAP_DEFAULT_NAME + #define EASTL_LIST_MAP_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " list_map" // Unless the user overrides something, this is "EASTL list_map". + #endif + + /// EASTL_MAP_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_LIST_MAP_DEFAULT_ALLOCATOR + #define EASTL_LIST_MAP_DEFAULT_ALLOCATOR allocator_type(EASTL_LIST_MAP_DEFAULT_NAME) + #endif + + + /// list_map_data_base + /// + /// We define a list_map_data_base separately from list_map_data (below), because it + /// allows us to have non-templated operations, and it makes it so that the + /// list_map anchor node doesn't carry a T with it, which would waste space and + /// possibly lead to surprising the user due to extra Ts existing that the user + /// didn't explicitly create. The downside to all of this is that it makes debug + /// viewing of an list_map harder, given that the node pointers are of type + /// list_map_data_base and not list_map_data. + /// + struct list_map_data_base + { + list_map_data_base* mpNext; + list_map_data_base* mpPrev; + }; + + + /// list_map_data + /// + template + struct list_map_data : public list_map_data_base + { + typedef Value value_type; + + list_map_data(const value_type& value); + + value_type mValue; // This is a pair of key/value. + }; + + + /// list_map_iterator + /// + template + struct list_map_iterator + { + typedef list_map_iterator this_type; + typedef list_map_iterator iterator; + typedef list_map_iterator const_iterator; + typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t. + typedef ptrdiff_t difference_type; + typedef T value_type; + typedef list_map_data_base base_node_type; + typedef list_map_data node_type; + typedef Pointer pointer; + typedef Reference reference; + typedef EASTL_ITC_NS::bidirectional_iterator_tag iterator_category; + +#if EA_IS_ENABLED(EASTL_DEPRECATIONS_FOR_2024_APRIL) + private: + base_node_type* mpNode; +#else + public: + node_type* mpNode; +#endif + + public: + list_map_iterator(); + list_map_iterator(const base_node_type* pNode); + + // This is the converting constructor of a non-const iterator to a const iterator + // This is never a copy constructor (due to enable_if) + template , bool> = true> + inline list_map_iterator(const iterator& x) + : mpNode(x.mpNode) + { + // Empty + } + + reference operator*() const; + pointer operator->() const; + + this_type& operator++(); + this_type operator++(int); + + this_type& operator--(); + this_type operator--(int); + + private: + // This is a temp helper function for the deprecation. + // It should be removed when the deprecation window ends. +#if EA_IS_ENABLED(EASTL_DEPRECATIONS_FOR_2024_APRIL) + base_node_type* toInternalNodeType(base_node_type* node) { return node; } +#else + node_type* toInternalNodeType(base_node_type* node) { return static_cast(node); } +#endif + + template + friend bool operator==(const list_map_iterator&, const list_map_iterator&); + + template + friend bool operator!=(const list_map_iterator&, const list_map_iterator&); + + template + friend bool operator!=(const list_map_iterator&, const list_map_iterator&); + + // list_map uses mpNode + template + friend class list_map; + + // for the "copy" constructor, which uses non-const iterator even in the + // const_iterator case. + friend iterator; + friend const_iterator; + }; // list_map_iterator + + + /// use_value_first + /// + /// operator()(x) simply returns x.mValue.first. Used in list_map. + /// This is similar to eastl::use_first, however it assumes that the input type is an object + /// whose mValue is an eastl::pair, and the first value in the pair is the desired return. + /// + template + struct use_value_first + { + typedef Object argument_type; + typedef typename Object::value_type::first_type result_type; + + const result_type& operator()(const Object& x) const + { return x.mValue.first; } + }; + + + /// list_map + /// + /// Implements a map like container, which also provides functionality similar to a list. + /// + /// Note: Like a map, keys must still be unique. As such, push_back() and push_front() operations + /// return a bool indicating success, or failure if the entry's key is already in use. + /// + /// list_map is designed to improve performance for situations commonly implemented as: + /// A map, which must be iterated over to find the oldest entry, or purge expired entries. + /// A list, which must be iterated over to remove a player's record when they sign off. + /// + /// list_map requires a little more memory per node than either a list or map alone, + /// and many of list_map's functions have a higher operational cost (CPU time) than their + /// counterparts in list and map. However, as the node count increases, list_map quickly outperforms + /// either a list or a map when find [by-index] and front/back type operations are required. + /// + /// In essence, list_map avoids O(n) iterations at the expense of additional costs to quick (O(1) and O(log n) operations: + /// push_front(), push_back(), pop_front() and pop_back() have O(log n) operation time, similar to map::insert(), rather than O(1) time like a list, + /// however, front() and back() maintain O(1) operation time. + /// + /// As a canonical example, consider a large backlog of player group invites, which are removed when either: + /// The invitation times out - in main loop: while( !listMap.empty() && listMap.front().IsExpired() ) { listMap.pop_front(); } + /// The player rejects the outstanding invitation - on rejection: iter = listMap.find(playerId); if (iter != listMap.end()) { listMap.erase(iter); } + /// + /// For a similar example, consider a high volume pending request container which must: + /// Time out old requests (similar to invites timing out above) + /// Remove requests once they've been handled (similar to rejecting invites above) + /// + /// For such usage patterns, the performance benefits of list_map become dramatic with + /// common O(n) operations once the node count rises to hundreds or more. + /// + /// When high performance is a priority, Containers with thousands of nodes or more + /// can quickly result in unacceptable performance when executing even infrequenty O(n) operations. + /// + /// In order to maintain strong performance, avoid iterating over list_map whenever possible. + /// + /////////////////////////////////////////////////////////////////////// + /// find_as + /// In order to support the ability to have a tree of strings but + /// be able to do efficiently lookups via char pointers (i.e. so they + /// aren't converted to string objects), we provide the find_as + /// function. This function allows you to do a find with a key of a + /// type other than the tree's key type. See the find_as function + /// for more documentation on this. + /// + /////////////////////////////////////////////////////////////////////// + /// Pool allocation + /// If you want to make a custom memory pool for a list_map container, your pool + /// needs to contain items of type list_map::node_type. So if you have a memory + /// pool that has a constructor that takes the size of pool items and the + /// count of pool items, you would do this (assuming that MemoryPool implements + /// the Allocator interface): + /// typedef list_map, MemoryPool> WidgetMap; // Delare your WidgetMap type. + /// MemoryPool myPool(sizeof(WidgetMap::node_type), 100); // Make a pool of 100 Widget nodes. + /// WidgetMap myMap(&myPool); // Create a map that uses the pool. + /// + template , typename Allocator = EASTLAllocatorType> + class list_map + : protected rbtree >, Compare, Allocator, eastl::use_value_first > >, true, true> + { + public: + typedef rbtree >, Compare, Allocator, + eastl::use_value_first > >, true, true> base_type; + typedef list_map this_type; + typedef typename base_type::size_type size_type; + typedef typename base_type::key_type key_type; + typedef T mapped_type; + typedef typename eastl::pair value_type; // This is intentionally different from base_type::value_type + typedef value_type& reference; + typedef const value_type& const_reference; + typedef typename base_type::node_type node_type; // Despite the internal and external values being different, we're keeping the node type the same as the base + // in order to allow for pool allocation. See EASTL/map.h for more information. + typedef typename eastl::list_map_iterator iterator; // This is intentionally different from base_type::iterator + typedef typename eastl::list_map_iterator const_iterator; // This is intentionally different from base_type::const_iterator + typedef eastl::reverse_iterator reverse_iterator; + typedef eastl::reverse_iterator const_reverse_iterator; + typedef typename base_type::allocator_type allocator_type; + typedef typename eastl::pair insert_return_type; // This is intentionally removed, as list_map doesn't support insert() functions, in favor of list like push_back and push_front + typedef typename eastl::use_first extract_key; // This is intentionally different from base_type::extract_key + + using base_type::get_allocator; + using base_type::set_allocator; + using base_type::key_comp; + using base_type::empty; + using base_type::size; + + protected: + typedef typename eastl::list_map_data > internal_value_type; + + protected: + // internal base node, acting as the sentinel for list like behaviors + list_map_data_base mNode; + + public: + list_map(const allocator_type& allocator = EASTL_LIST_MAP_DEFAULT_ALLOCATOR); + list_map(const Compare& compare, const allocator_type& allocator = EASTL_MAP_DEFAULT_ALLOCATOR); + + // To do: Implement the following: + + //list_map(const this_type& x); + //list_map(this_type&& x); + //list_map(this_type&& x, const allocator_type& allocator); + //list_map(std::initializer_list ilist, const Compare& compare = Compare(), const allocator_type& allocator = EASTL_LIST_MAP_DEFAULT_ALLOCATOR); + + //template + //list_map(Iterator itBegin, Iterator itEnd); + + //this_type& operator=(const this_type& x); + //this_type& operator=(std::initializer_list ilist); + //this_type& operator=(this_type&& x); + + //void swap(this_type& x); + + public: + // iterators + iterator begin() EA_NOEXCEPT; + const_iterator begin() const EA_NOEXCEPT; + const_iterator cbegin() const EA_NOEXCEPT; + + iterator end() EA_NOEXCEPT; + const_iterator end() const EA_NOEXCEPT; + const_iterator cend() const EA_NOEXCEPT; + + reverse_iterator rbegin() EA_NOEXCEPT; + const_reverse_iterator rbegin() const EA_NOEXCEPT; + const_reverse_iterator crbegin() const EA_NOEXCEPT; + + reverse_iterator rend() EA_NOEXCEPT; + const_reverse_iterator rend() const EA_NOEXCEPT; + const_reverse_iterator crend() const EA_NOEXCEPT; + + public: + // List like methods + reference front(); + const_reference front() const; + + reference back(); + const_reference back() const; + + // push_front and push_back which takes in a key/value pair + bool push_front(const value_type& value); + bool push_back(const value_type& value); + + // push_front and push_back which take key and value separately, for convenience + bool push_front(const key_type& key, const mapped_type& value); + bool push_back(const key_type& key, const mapped_type& value); + + void pop_front(); + void pop_back(); + + public: + // Map like methods + iterator find(const key_type& key); + const_iterator find(const key_type& key) const; + + template + iterator find_as(const U& u, Compare2 compare2); + template + const_iterator find_as(const U& u, Compare2 compare2) const; + + size_type count(const key_type& key) const; + size_type erase(const key_type& key); + + public: + // Shared methods which are common to list and map + iterator erase(const_iterator position); + reverse_iterator erase(const_reverse_iterator position); + + void clear(); + void reset_lose_memory(); + + bool validate() const; + int validate_iterator(const_iterator i) const; + + public: + // list like functionality which is in consideration for implementation: + // iterator insert(const_iterator position, const value_type& value); + // void remove(const mapped_type& x); + + public: + // list like functionality which may be implemented, but is discouraged from implementation: + // due to the liklihood that they would require O(n) time to execute. + // template + // void remove_if(Predicate); + // void reverse(); + // void sort(); + // template + // void sort(Compare compare); + + public: + // map like functionality which list_map does not support, due to abmiguity with list like functionality: + #if !defined(EA_COMPILER_NO_DELETED_FUNCTIONS) + template + list_map(InputIterator first, InputIterator last, const Compare& compare, const allocator_type& allocator = EASTL_RBTREE_DEFAULT_ALLOCATOR) = delete; + + insert_return_type insert(const value_type& value) = delete; + iterator insert(const_iterator position, const value_type& value) = delete; + + template + void insert(InputIterator first, InputIterator last) = delete; + + insert_return_type insert(const key_type& key) = delete; + + iterator erase(const_iterator first, const_iterator last) = delete; + reverse_iterator erase(reverse_iterator first, reverse_iterator last) = delete; + + void erase(const key_type* first, const key_type* last) = delete; + + iterator lower_bound(const key_type& key) = delete; + const_iterator lower_bound(const key_type& key) const = delete; + + iterator upper_bound(const key_type& key) = delete; + const_iterator upper_bound(const key_type& key) const = delete; + + eastl::pair equal_range(const key_type& key) = delete; + eastl::pair equal_range(const key_type& key) const = delete; + + mapped_type& operator[](const key_type& key) = delete; // Of map, multimap, set, and multimap, only map has operator[]. + #endif + + public: + // list like functionality which list_map does not support, due to ambiguity with map like functionality: + #if 0 + reference push_front() = delete; + void* push_front_uninitialized() = delete; + + reference push_back() = delete; + void* push_back_uninitialized() = delete; + + iterator insert(const_iterator position) = delete; + + void insert(const_iterator position, size_type n, const value_type& value) = delete; + + template + void insert(const_iterator position, InputIterator first, InputIterator last) = delete; + + iterator erase(const_iterator first, const_iterator last) = delete; + reverse_iterator erase(const_reverse_iterator first, const_reverse_iterator last) = delete; + + void splice(const_iterator position, this_type& x) = delete + void splice(const_iterator position, this_type& x, const_iterator i) = delete; + void splice(const_iterator position, this_type& x, const_iterator first, const_iterator last) = delete; + + void merge(this_type& x) = delete; + + template + void merge(this_type& x, Compare compare) = delete; + + void unique() = delete; // Uniqueness is enforced by map functionality + + template + void unique(BinaryPredicate) = delete; // Uniqueness is enforced by map functionality + #endif + + }; // list_map + + + /////////////////////////////////////////////////////////////////////// + // list_map_data + /////////////////////////////////////////////////////////////////////// + + template + inline list_map_data::list_map_data(const Value& value) + : mValue(value) + { + mpNext = NULL; // GCC 4.8 is generating warnings about referencing these values in list_map::push_front unless we + mpPrev = NULL; // initialize them here. The compiler seems to be mistaken, as our code isn't actually using them unintialized. + } + + + /////////////////////////////////////////////////////////////////////// + // list_map_iterator + /////////////////////////////////////////////////////////////////////// + + template + inline list_map_iterator::list_map_iterator() + : mpNode(NULL) + { + // Empty + } + + + template + inline list_map_iterator::list_map_iterator(const base_node_type* pNode) + : mpNode(toInternalNodeType(const_cast(pNode))) + { + // Empty + } + + + template + inline typename list_map_iterator::reference + list_map_iterator::operator*() const + { + return static_cast(mpNode)->mValue; + } + + + template + inline typename list_map_iterator::pointer + list_map_iterator::operator->() const + { + return &static_cast(mpNode)->mValue; + } + + + template + inline typename list_map_iterator::this_type& + list_map_iterator::operator++() + { + mpNode = toInternalNodeType(mpNode->mpNext); + return *this; + } + + + template + inline typename list_map_iterator::this_type + list_map_iterator::operator++(int) + { + this_type temp(*this); + mpNode = toInternalNodeType(mpNode->mpNext); + return temp; + } + + + template + inline typename list_map_iterator::this_type& + list_map_iterator::operator--() + { + mpNode = toInternalNodeType(mpNode->mpPrev); + return *this; + } + + + template + inline typename list_map_iterator::this_type + list_map_iterator::operator--(int) + { + this_type temp(*this); + mpNode = toInternalNodeType(mpNode->mpPrev); + return temp; + } + + + // We provide additional template paremeters here to support comparisons between const and non-const iterators. + // See C++ defect report #179, or EASTL/list.h for more information. + template + inline bool operator==(const list_map_iterator& a, + const list_map_iterator& b) + { + return a.mpNode == b.mpNode; + } + + + template + inline bool operator!=(const list_map_iterator& a, + const list_map_iterator& b) + { + return a.mpNode != b.mpNode; + } + + + // We provide a version of operator!= for the case where the iterators are of the + // same type. This helps prevent ambiguity errors in the presence of rel_ops. + template + inline bool operator!=(const list_map_iterator& a, + const list_map_iterator& b) + { + return a.mpNode != b.mpNode; + } + + + /////////////////////////////////////////////////////////////////////// + // list_map + /////////////////////////////////////////////////////////////////////// + + template + inline list_map::list_map(const allocator_type& allocator) + : base_type(allocator) + { + mNode.mpNext = &mNode; + mNode.mpPrev = &mNode; + } + + template + inline list_map::list_map(const Compare& compare, const allocator_type& allocator) + : base_type(compare, allocator) + { + mNode.mpNext = &mNode; + mNode.mpPrev = &mNode; + } + + template + inline typename list_map::iterator + list_map::begin() EA_NOEXCEPT + { + return iterator(mNode.mpNext); + } + + template + inline typename list_map::const_iterator + list_map::begin() const EA_NOEXCEPT + { + return const_iterator(mNode.mpNext); + } + + template + inline typename list_map::const_iterator + list_map::cbegin() const EA_NOEXCEPT + { + return const_iterator(mNode.mpNext); + } + + template + inline typename list_map::iterator + list_map::end() EA_NOEXCEPT + { + return iterator(&mNode); + } + + template + inline typename list_map::const_iterator + list_map::end() const EA_NOEXCEPT + { + return const_iterator(&mNode); + } + + template + inline typename list_map::const_iterator + list_map::cend() const EA_NOEXCEPT + { + return const_iterator(&mNode); + } + + template + inline typename list_map::reverse_iterator + list_map::rbegin() EA_NOEXCEPT + { + return reverse_iterator(&mNode); + } + + template + inline typename list_map::const_reverse_iterator + list_map::rbegin() const EA_NOEXCEPT + { + return const_reverse_iterator(&mNode); + } + + template + inline typename list_map::const_reverse_iterator + list_map::crbegin() const EA_NOEXCEPT + { + return const_reverse_iterator(&mNode); + } + + template + inline typename list_map::reverse_iterator + list_map::rend() EA_NOEXCEPT + { + return reverse_iterator(mNode.mpNext); + } + + template + inline typename list_map::const_reverse_iterator + list_map::rend() const EA_NOEXCEPT + { + return const_reverse_iterator(mNode.mpNext); + } + + template + inline typename list_map::const_reverse_iterator + list_map::crend() const EA_NOEXCEPT + { + return const_reverse_iterator(mNode.mpNext); + } + + template + inline typename list_map::reference + list_map::front() + { + #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED + if (EASTL_UNLIKELY(static_cast(mNode.mpNext) == &mNode)) + EASTL_FAIL_MSG("list_map::front -- empty container"); + #else + // We allow the user to reference an empty container. + #endif + + return static_cast(mNode.mpNext)->mValue; + } + + template + inline typename list_map::const_reference + list_map::front() const + { + #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED + if (EASTL_UNLIKELY(static_cast(mNode.mpNext) == &mNode)) + EASTL_FAIL_MSG("list_map::front -- empty container"); + #else + // We allow the user to reference an empty container. + #endif + + return static_cast(mNode.mpNext)->mValue; + } + + template + inline typename list_map::reference + list_map::back() + { + #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED + if (EASTL_UNLIKELY(static_cast(mNode.mpNext) == &mNode)) + EASTL_FAIL_MSG("list_map::back -- empty container"); + #else + // We allow the user to reference an empty container. + #endif + + return static_cast(mNode.mpPrev)->mValue; + } + + template + inline typename list_map::const_reference + list_map::back() const + { + #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED + if (EASTL_UNLIKELY(static_cast(mNode.mpNext) == &mNode)) + EASTL_FAIL_MSG("list_map::back -- empty container"); + #else + // We allow the user to reference an empty container. + #endif + + return static_cast(mNode.mpPrev)->mValue; + } + + template + bool list_map::push_front(const value_type& value) + { + internal_value_type tempValue(value); + typename base_type::insert_return_type baseReturn = base_type::insert(tempValue); + + // Did the insert succeed? + if (baseReturn.second) + { + internal_value_type* pNode = &(*baseReturn.first); + + pNode->mpNext = mNode.mpNext; + pNode->mpPrev = &mNode; + + mNode.mpNext->mpPrev = pNode; + mNode.mpNext = pNode; + + return true; + } + else + { + return false; + } + } + + template + bool list_map::push_back(const value_type& value) + { + internal_value_type tempValue(value); + typename base_type::insert_return_type baseReturn = base_type::insert(tempValue); + + // Did the insert succeed? + if (baseReturn.second) + { + internal_value_type* pNode = &(*baseReturn.first); + + pNode->mpPrev = mNode.mpPrev; + pNode->mpNext = &mNode; + + mNode.mpPrev->mpNext = pNode; + mNode.mpPrev = pNode; + + return true; + } + else + { + return false; + } + } + + template + bool list_map::push_front(const key_type& key, const mapped_type& value) + { + return push_front(eastl::make_pair(key, value)); + } + + template + bool list_map::push_back(const key_type& key, const mapped_type& value) + { + return push_back(eastl::make_pair(key, value)); + } + + template + void list_map::pop_front() + { + #if EASTL_ASSERT_ENABLED + if (EASTL_UNLIKELY(empty())) + EASTL_FAIL_MSG("list_map::pop_front -- empty container"); + #endif + + erase(static_cast(mNode.mpNext)->mValue.first); + } + + template + void list_map::pop_back() + { + #if EASTL_ASSERT_ENABLED + if (EASTL_UNLIKELY(empty())) + EASTL_FAIL_MSG("list_map::pop_back -- empty container"); + #endif + + erase(static_cast(mNode.mpPrev)->mValue.first); + } + + template + inline typename list_map::iterator + list_map::find(const key_type& key) + { + typename base_type::iterator baseIter = base_type::find(key); + if (baseIter != base_type::end()) + { + return iterator(&(*baseIter)); + } + else + { + return end(); + } + } + + template + inline typename list_map::const_iterator + list_map::find(const key_type& key) const + { + typename base_type::const_iterator baseIter = base_type::find(key); + if (baseIter != base_type::end()) + { + return const_iterator(&(*baseIter)); + } + else + { + return end(); + } + } + + template + template + inline typename list_map::iterator + list_map::find_as(const U& u, Compare2 compare2) + { + typename base_type::iterator baseIter = base_type::find_as(u, compare2); + if (baseIter != base_type::end()) + { + return iterator(&(*baseIter)); + } + else + { + return end(); + } + } + + template + template + inline typename list_map::const_iterator + list_map::find_as(const U& u, Compare2 compare2) const + { + typename base_type::const_iterator baseIter = base_type::find_as(u, compare2); + if (baseIter != base_type::end()) + { + return const_iterator(&(*baseIter)); + } + else + { + return end(); + } + } + + template + inline typename list_map::size_type + list_map::count(const key_type& key) const + { + const typename base_type::const_iterator it = base_type::find(key); + return (it != base_type::end()) ? 1 : 0; + } + + template + inline typename list_map::size_type + list_map::erase(const key_type& key) + { + typename base_type::iterator baseIter = base_type::find(key); + if (baseIter != base_type::end()) + { + internal_value_type* node = &(*baseIter); + + node->mpNext->mpPrev = node->mpPrev; + node->mpPrev->mpNext = node->mpNext; + + base_type::erase(baseIter); + + return 1; + } + return 0; + } + + template + inline typename list_map::iterator + list_map::erase(const_iterator position) + { + iterator posIter(position.mpNode); // Convert from const. + iterator eraseIter(posIter++); + erase(eraseIter->first); + return posIter; + } + + template + inline typename list_map::reverse_iterator + list_map::erase(const_reverse_iterator position) + { + return reverse_iterator(erase((++position).base())); + } + + template + void list_map::clear() + { + base_type::clear(); + + mNode.mpNext = &mNode; + mNode.mpPrev = &mNode; + } + + template + void list_map::reset_lose_memory() + { + base_type::reset_lose_memory(); + + mNode.mpNext = &mNode; + mNode.mpPrev = &mNode; + } + + template + bool list_map::validate() const + { + if (!base_type::validate()) + { + return false; + } + + size_type nodeCount(0); + list_map_data_base* node = mNode.mpNext; + while (node != &mNode) + { + internal_value_type* data = static_cast(node); + if (base_type::find(data->mValue.first) == base_type::end()) + { + return false; + } + node = node->mpNext; + ++nodeCount; + } + if (nodeCount != size()) + { + return false; + } + nodeCount = 0; + node = mNode.mpPrev; + while (node != &mNode) + { + internal_value_type* data = static_cast(node); + if (base_type::find(data->mValue.first) == base_type::end()) + { + return false; + } + node = node->mpPrev; + ++nodeCount; + } + if (nodeCount != size()) + { + return false; + } + + return true; + } + + template + int list_map::validate_iterator(const_iterator iter) const + { + for (const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp) + { + if (temp == iter) + { + return (isf_valid | isf_current | isf_can_dereference); + } + } + + if (iter == end()) + return (isf_valid | isf_current); + + return isf_none; + } + + +} // namespace eastl + +EA_RESTORE_VC_WARNING(); + +#endif // Header include guard + + + + diff --git a/external/EASTL/include/EASTL/bonus/lru_cache.h b/external/EASTL/include/EASTL/bonus/lru_cache.h new file mode 100644 index 00000000..42719fd1 --- /dev/null +++ b/external/EASTL/include/EASTL/bonus/lru_cache.h @@ -0,0 +1,435 @@ +/////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +/////////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// lru_cache is a container that simplifies caching of objects in a map. +// Basically, you give the container a key, like a string, and the data you want. +// The container provides callback mechanisms to generate data if it's missing +// as well as delete data when it's purged from the cache. This container +// uses a least recently used method: whatever the oldest item is will be +// replaced with a new entry. +// +// Algorithmically, the container is a combination of a map and a list. +// The list stores the age of the entries by moving the entry to the head +// of the list on each access, either by a call to get() or to touch(). +// The map is just the map as one would expect. +// +// This is useful for caching off data that is expensive to generate, +// for example text to speech wave files that are dynamically generated, +// but that will need to be reused, as is the case in narration of menu +// entries as a user scrolls through the entries. +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_LRUCACHE_H +#define EASTL_LRUCACHE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) +#pragma once +#endif + +#include +#include +#include +#include // for pair +#include // for function, hash, equal_to + +namespace eastl +{ + /// EASTL_LRUCACHE_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// + #ifndef EASTL_LRUCACHE_DEFAULT_NAME + #define EASTL_LRUCACHE_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " lru_cache" // Unless the user overrides something, this is "EASTL lru_cache". + #endif + + + /// EASTL_LRUCACHE_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_LRUCACHE_DEFAULT_ALLOCATOR + #define EASTL_LRUCACHE_DEFAULT_ALLOCATOR allocator_type(EASTL_LRUCACHE_DEFAULT_NAME) + #endif + + /// lru_cache + /// + /// Implements a caching map based off of a key and data. + /// LRUList parameter is any container that guarantees the validity of its iterator even after a modification (e.g. list) + /// LRUMap is any associative container that can map a key to some data. By default, we use unordered_map, but it might be better + /// to use hash_map or some other structure depending on your key/data combination. For example, you may want to swap the + /// map backing if using strings as keys or if the data objects are small. In any case, unordered_map is a good default and should + /// work well enough since the purpose of this class is to cache results of expensive, order of milliseconds, operations + /// + /// Algorithmic Performance (default data structures): + /// touch() -> O(1) + /// insert() / update(), get() / operator[] -> equivalent to unordered_map (O(1) on average, O(n) worst) + /// size() -> O(1) + /// + /// All accesses to a given key (insert, update, get) will push that key to most recently used. + /// If the data objects are shared between threads, it would be best to use a smartptr to manage the lifetime of the data. + /// as it could be removed from the cache while in use by another thread. + template , + typename map_type = eastl::unordered_map, + eastl::hash, + eastl::equal_to, + Allocator>> + class lru_cache + { + public: + using key_type = Key; + using value_type = Value; + using allocator_type = Allocator; + using size_type = eastl_size_t; + using list_iterator = typename list_type::iterator; + using map_iterator = typename map_type::iterator; + using data_container_type = eastl::pair; + using iterator = typename map_type::iterator; + using const_iterator = typename map_type::const_iterator; + using this_type = lru_cache; + using create_callback_type = eastl::function; + using delete_callback_type = eastl::function; + + /// lru_cache constructor + /// + /// Creates a Key / Value map that only stores size Value objects until it deletes them. + /// For complex objects or operations, the creator and deletor callbacks can be used. + /// This works just like a regular map object: on access, the Value will be created if it doesn't exist, returned otherwise. + explicit lru_cache(size_type size, + const allocator_type& allocator = EASTL_LRUCACHE_DEFAULT_ALLOCATOR, + create_callback_type creator = nullptr, + delete_callback_type deletor = nullptr) + : m_list(allocator) + , m_map(allocator) + , m_capacity(size) + , m_create_callback(creator) + , m_delete_callback(deletor) + { + } + + /// lru_cache destructor + /// + /// Iterates across every entry in the map and calls the deletor before calling the standard destructors + ~lru_cache() + { + // Destruct everything we have cached + for (auto& iter : m_map) + { + if (m_delete_callback) + m_delete_callback(iter.second.first); + } + } + + lru_cache(std::initializer_list> il) + : lru_cache(static_cast(il.size())) + { + for(auto& p : il) + insert_or_assign(p.first, p.second); + } + + // TODO(rparolin): Why do we prevent copies? And what about moves? + lru_cache(const this_type&) = delete; + this_type &operator=(const this_type&) = delete; + + /// insert + /// + /// insert key k with value v. + /// If key already exists, no change is made and the return value is false. + /// If the key doesn't exist, the data is added to the map and the return value is true. + bool insert(const key_type& k, const value_type& v) + { + if (m_map.find(k) == m_map.end()) + { + make_space(); + + m_list.push_front(k); + m_map[k] = data_container_type(v, m_list.begin()); + + return true; + } + else + { + return false; + } + } + + /// emplace + /// + /// Places a new object in place k created with args + /// If the key already exists, no change is made. + /// return value is a pair of the iterator to the emplaced or already-existing element and a bool denoting whether insertion took place. + template + eastl::pair emplace(const key_type& k, Args&&... args) + { + auto it = m_map.find(k); + if (it == m_map.end()) + { + make_space(); + + m_list.push_front(k); + return m_map.emplace(k, data_container_type(piecewise_construct, eastl::forward_as_tuple(eastl::forward(args)...), make_tuple(m_list.begin()))); + } + else + { + return make_pair(it, false); + } + } + + /// insert_or_assign + /// + /// Same as add, but replaces the data at key k, if it exists, with the new entry v + /// Note that the deletor for the old v will be called before it's replaced with the new value of v + void insert_or_assign(const key_type& k, const value_type& v) + { + auto iter = m_map.find(k); + + if (m_map.find(k) != m_map.end()) + { + assign(iter, v); + } + else + { + insert(k, v); + } + } + + /// contains + /// + /// Returns true if key k exists in the cache + bool contains(const key_type& k) const + { + return m_map.find(k) != m_map.end(); + } + + /// at + /// + /// Retrives the data for key k, not valid if k does not exist + eastl::optional at(const key_type& k) + { + auto iter = m_map.find(k); + + if (iter != m_map.end()) + { + return iter->second.first; + } + else + { + return eastl::nullopt; + } + } + + /// get + /// + /// Retrives the data for key k. If no data exists, it will be created by calling the + /// creator. + value_type& get(const key_type& k) + { + auto iter = m_map.find(k); + + // The entry exists in the cache + if (iter != m_map.end()) + { + touch(k); + return iter->second.first; + } + else // The entry doesn't exist in the cache, so create one + { + // Add the entry to the map + insert(k, m_create_callback ? m_create_callback(k) : value_type()); + + // return the new data + return m_map[k].first; + } + } + + /// Equivalent to get(k) + value_type& operator[](const key_type& k) { return get(k); } + + /// erase + /// + /// erases key k from the cache. + /// If k does not exist, returns false. If k exists, returns true. + bool erase(const key_type& k) + { + auto iter = m_map.find(k); + + if (iter != m_map.end()) + { + m_list.erase(iter->second.second); + + // Delete the actual entry + map_erase(iter); + + return true; + } + + return false; + } + + /// erase_oldest + /// + /// Removes the oldest entry from the cache. + void erase_oldest() + { + auto key = m_list.back(); + m_list.pop_back(); + + // Delete the actual entry + auto iter = m_map.find(key); + map_erase(iter); + } + + /// touch + /// + /// Touches key k, marking it as most recently used. + /// If k does not exist, returns false. If the touch was successful, returns true. + bool touch(const key_type& k) + { + auto iter = m_map.find(k); + + if (iter != m_map.end()) + { + touch(iter); + return true; + } + + return false; + } + + /// touch + /// + /// Touches key at iterator iter, moving it to most recently used position + void touch(iterator& iter) + { + auto listRef = iter->second.second; + + m_list.erase(listRef); + m_list.push_front(iter->first); + iter->second.second = m_list.begin(); + } + + /// assign + /// + /// Updates key k with data v. + /// If key k does not exist, returns false and no changes are made. + /// If key k exists, existing data has its deletor called and key k's data is replaced with new v data + bool assign(const key_type& k, const value_type& v) + { + auto iter = m_map.find(k); + + if (iter != m_map.end()) + { + assign(iter, v); + return true; + } + + return false; + } + + /// assign + /// + /// Updates data at spot iter with data v. + void assign(iterator& iter, const value_type& v) + { + if (m_delete_callback) + m_delete_callback(iter->second.first); + touch(iter); + iter->second.first = v; + } + + // standard container functions + iterator begin() EA_NOEXCEPT { return m_map.begin(); } + iterator end() EA_NOEXCEPT { return m_map.end(); } + iterator rbegin() EA_NOEXCEPT { return m_map.rbegin(); } + iterator rend() EA_NOEXCEPT { return m_map.rend(); } + const_iterator begin() const EA_NOEXCEPT { return m_map.begin(); } + const_iterator cbegin() const EA_NOEXCEPT { return m_map.cbegin(); } + const_iterator crbegin() const EA_NOEXCEPT { return m_map.crbegin(); } + const_iterator end() const EA_NOEXCEPT { return m_map.end(); } + const_iterator cend() const EA_NOEXCEPT { return m_map.cend(); } + const_iterator crend() const EA_NOEXCEPT { return m_map.crend(); } + + bool empty() const EA_NOEXCEPT { return m_map.empty(); } + size_type size() const EA_NOEXCEPT { return m_map.size(); } + size_type capacity() const EA_NOEXCEPT { return m_capacity; } + + void clear() EA_NOEXCEPT + { + // Since we have a delete callback, we want to reuse the trim function by cheating the max + // size to clear all the entries to avoid duplicating code. + auto old_max = m_capacity; + + m_capacity = 0; + trim(); + m_capacity = old_max; + } + + /// resize + /// + /// Resizes the cache. Can be used to either expand or contract the cache. + /// In the case of a contraction, the oldest entries will be evicted with their respective + /// deletors called before completing. + void resize(size_type newSize) + { + m_capacity = newSize; + trim(); + } + + void setCreateCallback(create_callback_type callback) { m_create_callback = callback; } + void setDeleteCallback(delete_callback_type callback) { m_delete_callback = callback; } + + // EASTL extensions + const allocator_type& get_allocator() const EA_NOEXCEPT { return m_map.get_allocator(); } + allocator_type& get_allocator() EA_NOEXCEPT { return m_map.get_allocator(); } + void set_allocator(const allocator_type& allocator) { m_map.set_allocator(allocator); m_list.set_allocator(allocator); } + + /// Does not reset the callbacks + void reset_lose_memory() EA_NOEXCEPT { m_map.reset_lose_memory(); m_list.reset_lose_memory(); } + + private: + inline void map_erase(map_iterator pos) + { + if (m_delete_callback) + m_delete_callback(pos->second.first); + m_map.erase(pos); + } + + bool trim() + { + if (size() <= m_capacity) + { + return false; // No trim necessary + } + + // We need to trim + do + { + erase_oldest(); + } while (m_list.size() > m_capacity); + + return true; + } + + void make_space() + { + if (size() == m_capacity) + { + erase_oldest(); + } + } + + private: + list_type m_list; + map_type m_map; + size_type m_capacity; + create_callback_type m_create_callback; + delete_callback_type m_delete_callback; + }; +} + + + +#endif diff --git a/external/EASTL/include/EASTL/bonus/overloaded.h b/external/EASTL/include/EASTL/bonus/overloaded.h new file mode 100644 index 00000000..32feab47 --- /dev/null +++ b/external/EASTL/include/EASTL/bonus/overloaded.h @@ -0,0 +1,86 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_OVERLOADED_H +#define EASTL_OVERLOADED_H + +#include +#include + + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) +#pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed + // improvements in apps as a result. +#endif + +// 4512/4626 - 'class' : assignment operator could not be generated. // This disabling would best be put elsewhere. +EA_DISABLE_VC_WARNING(4512 4626); + +namespace eastl +{ + /////////////////////////////////////////////////////////////////////////// + /// overloaded + /// + /// A helper class that permits you to combine multiple function objects into one. + /// Typically, this helper is really handy when visiting an eastl::variant with multiple lambdas. + /// Example: + /// + /// eastl::variant v{42}; + /// + /// eastl::visit( + /// eastl::overloaded{ + /// [](const int& x) { std::cout << "Visited an integer: " << x << "\n"; }, // Will reach that lambda with x == 42. + /// [](const string& s) { std::cout << "Visited an string: " << s << "\n"; } + /// }, + /// v + /// ); + /////////////////////////////////////////////////////////////////////////// + template + struct overloaded; + + template + struct overloaded : T + { + template + EA_CPP14_CONSTEXPR overloaded(U&& u) : T(eastl::forward(u)) + { + } + + using T::operator(); + }; + + template + struct overloaded : T, overloaded + { + template + EA_CPP14_CONSTEXPR overloaded(U&& u, V&&... v) : T(eastl::forward(u)), overloaded(eastl::forward(v)...) + { + } + + using T::operator(); + using overloaded::operator(); + }; + + #ifdef __cpp_deduction_guides + template + overloaded(T...) -> overloaded; + #endif + + /////////////////////////////////////////////////////////////////////////// + /// make_overloaded + /// + /// Helper function to create an overloaded instance when lacking deduction guides. + /// make_overloaded(f1, f2, f3) == overloaded{f1, f2, f3} + /////////////////////////////////////////////////////////////////////////// + template + EA_CPP14_CONSTEXPR overloaded::type...> make_overloaded(T&&... t) + { + return overloaded::type...>{eastl::forward(t)...}; + } + +} // namespace eastl + +EA_RESTORE_VC_WARNING(); + +#endif // EASTL_OVERLOADED_H diff --git a/external/EASTL/include/EASTL/bonus/ring_buffer.h b/external/EASTL/include/EASTL/bonus/ring_buffer.h new file mode 100644 index 00000000..99c23463 --- /dev/null +++ b/external/EASTL/include/EASTL/bonus/ring_buffer.h @@ -0,0 +1,1576 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// A ring buffer is a FIFO (first-in, first-out) container which acts +// much like a queue. The difference is that a ring buffer is implemented +// via chasing pointers around a given container instead of like queue +// adds to the writes to the end of the container are reads from the begin. +// The benefit of a ring buffer is that memory allocations don't occur +// and new elements are neither added nor removed from the container. +// Elements in the container are simply assigned values in circles around +// the container. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_RING_BUFFER_H +#define EASTL_RING_BUFFER_H + + +#include +#include +#include +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + /// EASTL_RING_BUFFER_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// + #ifndef EASTL_RING_BUFFER_DEFAULT_NAME + #define EASTL_RING_BUFFER_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " ring_buffer" // Unless the user overrides something, this is "EASTL ring_buffer". + #endif + + /// EASTL_RING_BUFFER_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_RING_BUFFER_DEFAULT_ALLOCATOR + #define EASTL_RING_BUFFER_DEFAULT_ALLOCATOR allocator_type(EASTL_RING_BUFFER_DEFAULT_NAME) + #endif + + + /// ring_buffer_iterator + /// + /// We force this iterator to act like a random access iterator even if + /// the underlying container doesn't support random access iteration. + /// Any BidirectionalIterator can be a RandomAccessIterator; it just + /// might be inefficient in some cases. + /// + template + struct ring_buffer_iterator + { + public: + typedef ring_buffer_iterator this_type; + typedef T value_type; + typedef Pointer pointer; + typedef Reference reference; + typedef typename Container::size_type size_type; + typedef typename Container::difference_type difference_type; + typedef typename Container::iterator container_iterator; + typedef typename Container::const_iterator container_const_iterator; + typedef ring_buffer_iterator iterator; + typedef ring_buffer_iterator const_iterator; + typedef EASTL_ITC_NS::random_access_iterator_tag iterator_category; + + public: + Container* mpContainer; + container_iterator mContainerIterator; + + public: + ring_buffer_iterator(); + ring_buffer_iterator(Container* pContainer, const container_iterator& containerIterator); + ring_buffer_iterator(const iterator& x); + + ring_buffer_iterator& operator=(const iterator& x); + + reference operator*() const; + pointer operator->() const; + + this_type& operator++(); + this_type operator++(int); + + this_type& operator--(); + this_type operator--(int); + + this_type& operator+=(difference_type n); + this_type& operator-=(difference_type n); + + this_type operator+(difference_type n) const; + this_type operator-(difference_type n) const; + + protected: + void increment(difference_type n, EASTL_ITC_NS::input_iterator_tag); + void increment(difference_type n, EASTL_ITC_NS::random_access_iterator_tag); + + }; // struct ring_buffer_iterator + + + + /// ring_buffer + /// + /// Implements a ring buffer via a given container type, which would + /// typically be a vector or array, though any container which supports + /// bidirectional iteration would work. + /// + /// A ring buffer is a FIFO (first-in, first-out) container which acts + /// much like a queue. The difference is that a ring buffer is implemented + /// via chasing pointers around a container and moving the read and write + /// positions forward (and possibly wrapping around) as the container is + /// read and written via pop_front and push_back. + /// + /// The benefit of a ring buffer is that memory allocations don't occur + /// and new elements are neither added nor removed from the container. + /// Elements in the container are simply assigned values in circles around + /// the container. + /// + /// ring_buffer is different from other containers -- including adapter + /// containers -- in how iteration is done. Iteration of a ring buffer + /// starts at the current begin position, proceeds to the end of the underlying + /// container, and continues at the begin of the underlying container until + /// the ring buffer's current end position. Thus a ring_buffer does + /// indeed have a begin and an end, though the values of begin and end + /// chase each other around the container. An empty ring_buffer is one + /// in which end == begin, and a full ring_buffer is one in which + /// end + 1 == begin. + /// + /// Example of a ring buffer layout, where + indicates queued items: + /// ++++++++++--------------------------------+++++++++ + /// ^ ^ + /// end begin + /// + /// Empty ring buffer: + /// --------------------------------------------------- + /// ^ + /// begin / end + /// + /// Full ring buffer. Note that one item is necessarily unused; it is + /// analagous to a '\0' at the end of a C string: + /// +++++++++++++++++++++++++++++++++++++++++-+++++++++ + /// ^^ + /// end begin + /// + /// A push_back operation on a ring buffer assigns the new value to end. + /// If there is no more space in the buffer, this will result in begin + /// being overwritten and the begin position being moved foward one position. + /// The user can use the full() function to detect this condition. + /// Note that elements in a ring buffer are not created or destroyed as + /// their are added and removed; they are merely assigned. Only on + /// container construction and destruction are any elements created and + /// destroyed. + /// + /// The ring buffer can be used in either direction. By this we mean that + /// you can use push_back to add items and pop_front to remove them; or you can + /// use push_front to add items and pop_back to remove them. You aren't + /// limited to these operations; you can push or pop from either side + /// arbitrarily and you can insert or erase anywhere in the container. + /// + /// The ring buffer requires the user to specify a Container type, which + /// by default is vector. However, any container with bidirectional iterators + /// will work, such as list, deque, string or any of the fixed_* versions + /// of these containers, such as fixed_string. Since ring buffer works via copying + /// elements instead of allocating and freeing nodes, inserting in the middle + /// of a ring buffer based on list (instead of vector) is no more efficient. + /// + /// To use the ring buffer, its container must be resized to the desired + /// ring buffer size. Changing the size of a ring buffer may cause ring + /// buffer iterators to invalidate. + /// + /// An alternative to using a ring buffer is to use a list with a user-created + /// node pool and custom allocator. There are various tradeoffs that result from this. + /// + /// Example usage: + /// ring_buffer< int, list > rb(100); + /// rb.push_back(1); + /// + /// Example usage: + /// // Example of creating an on-screen debug log that shows 16 + /// // strings at a time and scrolls older strings away. + /// + /// // Create ring buffer of 16 strings. + /// ring_buffer< string, vector > debugLogText(16); + /// + /// // Reserve 128 chars for each line. This can make it so that no + /// // runtime memory allocations occur. + /// for(vector::iterator it = debugLogText.get_container().begin(), + /// itEnd = debugLogText.get_container().end(); it != itEnd; ++it) + /// { + /// (*it).reserve(128); + /// } + /// + /// // Add a new string, using push_front() and front() instead of + /// // push_front(str) in order to avoid creating a temporary str. + /// debugLogText.push_front(); + /// debugLogText.front() = "Player fired weapon"; + /// + template , typename Allocator = typename Container::allocator_type> + class ring_buffer + { + public: + typedef ring_buffer this_type; + typedef Container container_type; + typedef Allocator allocator_type; + + typedef typename Container::value_type value_type; + typedef typename Container::reference reference; + typedef typename Container::const_reference const_reference; + typedef typename Container::size_type size_type; + typedef typename Container::difference_type difference_type; + typedef typename Container::iterator container_iterator; + typedef typename Container::const_iterator container_const_iterator; + typedef ring_buffer_iterator iterator; + typedef ring_buffer_iterator const_iterator; + typedef eastl::reverse_iterator reverse_iterator; + typedef eastl::reverse_iterator const_reverse_iterator; + + public: // We declare public so that global comparison operators can be implemented without adding an inline level and without tripping up GCC 2.x friend declaration failures. GCC (through at least v4.0) is poor at inlining and performance wins over correctness. + Container c; // We follow the naming convention established for stack, queue, priority_queue and name this 'c'. This variable must always have a size of at least 1, as even an empty ring_buffer has an unused terminating element. + + protected: + container_iterator mBegin; // We keep track of where our begin and end are by using Container iterators. + container_iterator mEnd; + size_type mSize; + + public: + // There currently isn't a ring_buffer constructor that specifies an initial size, unlike other containers. + explicit ring_buffer(size_type cap = 0); // Construct with an initial capacity (but size of 0). + explicit ring_buffer(size_type cap, const allocator_type& allocator); + explicit ring_buffer(const Container& x); + explicit ring_buffer(const allocator_type& allocator); + ring_buffer(const this_type& x); + ring_buffer(this_type&& x); + ring_buffer(this_type&& x, const allocator_type& allocator); + ring_buffer(std::initializer_list ilist, const allocator_type& allocator = EASTL_RING_BUFFER_DEFAULT_ALLOCATOR); // This function sets the capacity to be equal to the size of the initializer list. + + // No destructor necessary. Default will do. + + this_type& operator=(const this_type& x); + this_type& operator=(std::initializer_list ilist); + this_type& operator=(this_type&& x); + + template + void assign(InputIterator first, InputIterator last); + + void swap(this_type& x); + + iterator begin() EA_NOEXCEPT; + const_iterator begin() const EA_NOEXCEPT; + const_iterator cbegin() const EA_NOEXCEPT; + + iterator end() EA_NOEXCEPT; + const_iterator end() const EA_NOEXCEPT; + const_iterator cend() const EA_NOEXCEPT; + + reverse_iterator rbegin() EA_NOEXCEPT; + const_reverse_iterator rbegin() const EA_NOEXCEPT; + const_reverse_iterator crbegin() const EA_NOEXCEPT; + + reverse_iterator rend() EA_NOEXCEPT; + const_reverse_iterator rend() const EA_NOEXCEPT; + const_reverse_iterator crend() const EA_NOEXCEPT; + + bool empty() const EA_NOEXCEPT; + bool full() const EA_NOEXCEPT; + size_type size() const EA_NOEXCEPT; + size_type capacity() const EA_NOEXCEPT; + + void resize(size_type n); + void set_capacity(size_type n); // Sets the capacity to the given value, including values less than the current capacity. Adjusts the size downward if n < size, by throwing out the oldest elements in the buffer. + void reserve(size_type n); // Reserve a given capacity. Doesn't decrease the capacity; it only increases it (for compatibility with other containers' behavior). + + reference front(); + const_reference front() const; + + reference back(); + const_reference back() const; + + void push_back(const value_type& value); + reference push_back(); + + void push_front(const value_type& value); + reference push_front(); + + void pop_back(); + void pop_front(); + + reference operator[](size_type n); + const_reference operator[](size_type n) const; + + // To consider: + // size_type read(value_type* pDestination, size_type nCount); + // size_type read(iterator** pPosition1, iterator** pPosition2, size_type& nCount1, size_type& nCount2); + + /* To do: + template + reference emplace_front(Args&&... args); + + template + reference emplace_back(Args&&... args); + + template + iterator emplace(const_iterator position, Args&&... args); + */ + + iterator insert(const_iterator position, const value_type& value); + void insert(const_iterator position, size_type n, const value_type& value); + void insert(const_iterator position, std::initializer_list ilist); + + template + void insert(const_iterator position, InputIterator first, InputIterator last); + + iterator erase(const_iterator position); + iterator erase(const_iterator first, const_iterator last); + reverse_iterator erase(const_reverse_iterator position); + reverse_iterator erase(const_reverse_iterator first, const_reverse_iterator last); + + void clear(); + + container_type& get_container(); + const container_type& get_container() const; + + bool validate() const; + int validate_iterator(const_iterator i) const; + + protected: + //size_type DoGetSize(EASTL_ITC_NS::input_iterator_tag) const; + //size_type DoGetSize(EASTL_ITC_NS::random_access_iterator_tag) const; + + }; // class ring_buffer + + + + + /////////////////////////////////////////////////////////////////////// + // ring_buffer_iterator + /////////////////////////////////////////////////////////////////////// + + template + ring_buffer_iterator::ring_buffer_iterator() + : mpContainer(NULL), mContainerIterator() + { + } + + + template + ring_buffer_iterator::ring_buffer_iterator(Container* pContainer, const container_iterator& containerIterator) + : mpContainer(pContainer), mContainerIterator(containerIterator) + { + } + + + template + ring_buffer_iterator::ring_buffer_iterator(const iterator& x) + : mpContainer(x.mpContainer), mContainerIterator(x.mContainerIterator) + { + } + + + template + ring_buffer_iterator& + ring_buffer_iterator::operator=(const iterator& x) + { + mpContainer = x.mpContainer; + mContainerIterator = x.mContainerIterator; + return *this; + } + + template + typename ring_buffer_iterator::reference + ring_buffer_iterator::operator*() const + { + return *mContainerIterator; + } + + + template + typename ring_buffer_iterator::pointer + ring_buffer_iterator::operator->() const + { + return &*mContainerIterator; + } + + + template + typename ring_buffer_iterator::this_type& + ring_buffer_iterator::operator++() + { + if(EASTL_UNLIKELY(++mContainerIterator == mpContainer->end())) + mContainerIterator = mpContainer->begin(); + return *this; + } + + + template + typename ring_buffer_iterator::this_type + ring_buffer_iterator::operator++(int) + { + const this_type temp(*this); + if(EASTL_UNLIKELY(++mContainerIterator == mpContainer->end())) + mContainerIterator = mpContainer->begin(); + return temp; + } + + + template + typename ring_buffer_iterator::this_type& + ring_buffer_iterator::operator--() + { + if(EASTL_UNLIKELY(mContainerIterator == mpContainer->begin())) + mContainerIterator = mpContainer->end(); + --mContainerIterator; + return *this; + } + + + template + typename ring_buffer_iterator::this_type + ring_buffer_iterator::operator--(int) + { + const this_type temp(*this); + if(EASTL_UNLIKELY(mContainerIterator == mpContainer->begin())) + mContainerIterator = mpContainer->end(); + --mContainerIterator; + return temp; + } + + + template + typename ring_buffer_iterator::this_type& + ring_buffer_iterator::operator+=(difference_type n) + { + typedef typename eastl::iterator_traits::iterator_category IC; + increment(n, IC()); + return *this; + } + + + template + typename ring_buffer_iterator::this_type& + ring_buffer_iterator::operator-=(difference_type n) + { + typedef typename eastl::iterator_traits::iterator_category IC; + increment(-n, IC()); + return *this; + } + + + template + typename ring_buffer_iterator::this_type + ring_buffer_iterator::operator+(difference_type n) const + { + return this_type(*this).operator+=(n); + } + + + template + typename ring_buffer_iterator::this_type + ring_buffer_iterator::operator-(difference_type n) const + { + return this_type(*this).operator+=(-n); + } + + + template + void ring_buffer_iterator::increment(difference_type n, EASTL_ITC_NS::input_iterator_tag) + { + // n cannot be negative, as input iterators don't support reverse iteration. + while(n-- > 0) + operator++(); + } + + + template + void ring_buffer_iterator::increment(difference_type n, EASTL_ITC_NS::random_access_iterator_tag) + { + // We make the assumption here that the user is incrementing from a valid + // starting position to a valid ending position. Thus *this + n yields a + // valid iterator, including if n happens to be a negative value. + + if(n >= 0) + { + const difference_type d = mpContainer->end() - mContainerIterator; + + if(n < d) + mContainerIterator += n; + else + mContainerIterator = mpContainer->begin() + (n - d); + } + else + { + // Recall that n and d here will be negative and so the logic here works as intended. + const difference_type d = mpContainer->begin() - mContainerIterator; + + if(n >= d) + mContainerIterator += n; + else + mContainerIterator = mpContainer->end() + (n - d); + } + } + + + // Random access iterators must support operator + and operator -. + // You can only add an integer to an iterator, and you cannot add two iterators. + template + inline ring_buffer_iterator + operator+(ptrdiff_t n, const ring_buffer_iterator& x) + { + return x + n; // Implement (n + x) in terms of (x + n). + } + + + // You can only add an integer to an iterator, but you can subtract two iterators. + template + inline typename ring_buffer_iterator::difference_type + operator-(const ring_buffer_iterator& a, + const ring_buffer_iterator& b) + { + typedef typename ring_buffer_iterator::difference_type difference_type; + + // To do: If container_iterator is a random access iterator, then do a simple calculation. + // Otherwise, we have little choice but to iterate from a to b and count as we go. + // See the ring_buffer::size function for an implementation of this. + + // Iteration implementation: + difference_type d = 0; + + for(ring_buffer_iterator temp(b); temp != a; ++temp) + ++d; + + return d; + } + + + // The C++ defect report #179 requires that we support comparisons between const and non-const iterators. + // Thus we provide additional template paremeters here to support this. The defect report does not + // require us to support comparisons between reverse_iterators and const_reverse_iterators. + template + inline bool operator==(const ring_buffer_iterator& a, + const ring_buffer_iterator& b) + { + // Perhaps we should compare the container pointer as well. + // However, for valid iterators this shouldn't be necessary. + return a.mContainerIterator == b.mContainerIterator; + } + + + template + inline bool operator!=(const ring_buffer_iterator& a, + const ring_buffer_iterator& b) + { + // Perhaps we should compare the container pointer as well. + // However, for valid iterators this shouldn't be necessary. + return !(a.mContainerIterator == b.mContainerIterator); + } + + + // We provide a version of operator!= for the case where the iterators are of the + // same type. This helps prevent ambiguity errors in the presence of rel_ops. + template + inline bool operator!=(const ring_buffer_iterator& a, + const ring_buffer_iterator& b) + { + return !(a.mContainerIterator == b.mContainerIterator); + } + + + + + /////////////////////////////////////////////////////////////////////// + // ring_buffer + /////////////////////////////////////////////////////////////////////// + + template + ring_buffer::ring_buffer(size_type cap) + : c() // Default construction with default allocator for the container. + { + // To do: This code needs to be amended to deal with possible exceptions + // that could occur during the resize call below. + + // We add one because the element at mEnd is necessarily unused. + c.resize(cap + 1); // Possibly we could construct 'c' with size, but c may not have such a ctor, though we rely on it having a resize function. + mBegin = c.begin(); + mEnd = mBegin; + mSize = 0; + } + + + template + ring_buffer::ring_buffer(size_type cap, const allocator_type& allocator) + : c(allocator) + { + // To do: This code needs to be amended to deal with possible exceptions + // that could occur during the resize call below. + + // We add one because the element at mEnd is necessarily unused. + c.resize(cap + 1); // Possibly we could construct 'c' with size, but c may not have such a ctor, though we rely on it having a resize function. + mBegin = c.begin(); + mEnd = mBegin; + mSize = 0; + } + + + template + ring_buffer::ring_buffer(const Container& x) + : c(x) // This copies elements from x, but unless the user is doing some tricks, the only thing that matters is that c.size() == x.size(). + { + // To do: This code needs to be amended to deal with possible exceptions + // that could occur during the resize call below. + if(c.empty()) + c.resize(1); + mBegin = c.begin(); + mEnd = mBegin; + mSize = 0; + } + + + template + ring_buffer::ring_buffer(const allocator_type& allocator) + : c(allocator) + { + // To do: This code needs to be amended to deal with possible exceptions + // that could occur during the resize call below. + + // We add one because the element at mEnd is necessarily unused. + c.resize(1); // Possibly we could construct 'c' with size, but c may not have such a ctor, though we rely on it having a resize function. + mBegin = c.begin(); + mEnd = mBegin; + mSize = 0; + } + + + template + ring_buffer::ring_buffer(const this_type& x) + : c(x.c) + { + mBegin = c.begin(); + mEnd = mBegin; + mSize = x.mSize; + + eastl::advance(mBegin, eastl::distance(const_cast(x).c.begin(), x.mBegin)); // We can do a simple distance algorithm here, as there will be no wraparound. + eastl::advance(mEnd, eastl::distance(const_cast(x).c.begin(), x.mEnd)); + } + + template + ring_buffer::ring_buffer(this_type&& x) + : c() // Default construction with default allocator for the container. + { + c.resize(1); // Possibly we could construct 'c' with size, but c may not have such a ctor, though we rely on it having a resize function. + mBegin = c.begin(); + mEnd = mBegin; + mSize = 0; + + swap(x); // We are leaving x in an unusual state by swapping default-initialized members with it, as it won't be usable and can be only destructible. + } + + template + ring_buffer::ring_buffer(this_type&& x, const allocator_type& allocator) + : c(allocator) + { + c.resize(1); // Possibly we could construct 'c' with size, but c may not have such a ctor, though we rely on it having a resize function. + mBegin = c.begin(); + mEnd = mBegin; + mSize = 0; + + if(c.get_allocator() == x.c.get_allocator()) + swap(x); // We are leaving x in an unusual state by swapping default-initialized members with it, as it won't be usable and can be only destructible. + else + operator=(x); + } + + + template + ring_buffer::ring_buffer(std::initializer_list ilist, const allocator_type& allocator) + : c(allocator) + { + c.resize((eastl_size_t)ilist.size() + 1); + mBegin = c.begin(); + mEnd = mBegin; + mSize = 0; + + assign(ilist.begin(), ilist.end()); + } + + + template + typename ring_buffer::this_type& + ring_buffer::operator=(const this_type& x) + { + if(&x != this) + { + c = x.c; + + mBegin = c.begin(); + mEnd = mBegin; + mSize = x.mSize; + + eastl::advance(mBegin, eastl::distance(const_cast(x).c.begin(), x.mBegin)); // We can do a simple distance algorithm here, as there will be no wraparound. + eastl::advance(mEnd, eastl::distance(const_cast(x).c.begin(), x.mEnd)); + } + + return *this; + } + + + template + typename ring_buffer::this_type& + ring_buffer::operator=(this_type&& x) + { + swap(x); + return *this; + } + + + template + typename ring_buffer::this_type& + ring_buffer::operator=(std::initializer_list ilist) + { + assign(ilist.begin(), ilist.end()); + return *this; + } + + + template + template + void ring_buffer::assign(InputIterator first, InputIterator last) + { + // To consider: We can make specializations of this for pointer-based + // iterators to PODs and turn the action into a memcpy. + clear(); + + for(; first != last; ++first) + push_back(*first); + } + + + template + void ring_buffer::swap(this_type& x) + { + if(&x != this) + { + const difference_type dBegin = eastl::distance(c.begin(), mBegin); // We can do a simple distance algorithm here, as there will be no wraparound. + const difference_type dEnd = eastl::distance(c.begin(), mEnd); + + const difference_type dxBegin = eastl::distance(x.c.begin(), x.mBegin); + const difference_type dxEnd = eastl::distance(x.c.begin(), x.mEnd); + + eastl::swap(c, x.c); + eastl::swap(mSize, x.mSize); + + mBegin = c.begin(); + eastl::advance(mBegin, dxBegin); // We can do a simple advance algorithm here, as there will be no wraparound. + + mEnd = c.begin(); + eastl::advance(mEnd, dxEnd); + + x.mBegin = x.c.begin(); + eastl::advance(x.mBegin, dBegin); + + x.mEnd = x.c.begin(); + eastl::advance(x.mEnd, dEnd); + } + } + + + template + typename ring_buffer::iterator + ring_buffer::begin() EA_NOEXCEPT + { + return iterator(&c, mBegin); + } + + + template + typename ring_buffer::const_iterator + ring_buffer::begin() const EA_NOEXCEPT + { + return const_iterator(const_cast(&c), mBegin); // We trust that the const_iterator will respect const-ness. + } + + + template + typename ring_buffer::const_iterator + ring_buffer::cbegin() const EA_NOEXCEPT + { + return const_iterator(const_cast(&c), mBegin); // We trust that the const_iterator will respect const-ness. + } + + + template + typename ring_buffer::iterator + ring_buffer::end() EA_NOEXCEPT + { + return iterator(&c, mEnd); + } + + + template + typename ring_buffer::const_iterator + ring_buffer::end() const EA_NOEXCEPT + { + return const_iterator(const_cast(&c), mEnd); // We trust that the const_iterator will respect const-ness. + } + + + template + typename ring_buffer::const_iterator + ring_buffer::cend() const EA_NOEXCEPT + { + return const_iterator(const_cast(&c), mEnd); // We trust that the const_iterator will respect const-ness. + } + + + template + typename ring_buffer::reverse_iterator + ring_buffer::rbegin() EA_NOEXCEPT + { + return reverse_iterator(iterator(&c, mEnd)); + } + + + template + typename ring_buffer::const_reverse_iterator + ring_buffer::rbegin() const EA_NOEXCEPT + { + return const_reverse_iterator(const_iterator(const_cast(&c), mEnd)); + } + + + template + typename ring_buffer::const_reverse_iterator + ring_buffer::crbegin() const EA_NOEXCEPT + { + return const_reverse_iterator(const_iterator(const_cast(&c), mEnd)); + } + + + template + typename ring_buffer::reverse_iterator + ring_buffer::rend() EA_NOEXCEPT + { + return reverse_iterator(iterator(&c, mBegin)); + } + + + template + typename ring_buffer::const_reverse_iterator + ring_buffer::rend() const EA_NOEXCEPT + { + return const_reverse_iterator(const_iterator(const_cast(&c), mBegin)); + } + + + template + typename ring_buffer::const_reverse_iterator + ring_buffer::crend() const EA_NOEXCEPT + { + return const_reverse_iterator(const_iterator(const_cast(&c), mBegin)); + } + + + template + bool ring_buffer::empty() const EA_NOEXCEPT + { + return mBegin == mEnd; + } + + + template + bool ring_buffer::full() const EA_NOEXCEPT + { + // Implementation that relies on c.size() being a fast operation: + // return mSize == (c.size() - 1); // (c.size() - 1) == capacity(); we are attempting to reduce function calls. + + // Version that has constant speed guarantees, but is still pretty fast. + const_iterator afterEnd(end()); + ++afterEnd; + return afterEnd.mContainerIterator == mBegin; + } + + + template + typename ring_buffer::size_type + ring_buffer::size() const EA_NOEXCEPT + { + return mSize; + + // Alternatives: + // return eastl::distance(begin(), end()); + // return end() - begin(); // This is more direct than using distance(). + //typedef typename eastl::iterator_traits::iterator_category IC; + //return DoGetSize(IC()); // This is more direct than using iterator math. + } + + + /* + template + typename ring_buffer::size_type + ring_buffer::DoGetSize(EASTL_ITC_NS::input_iterator_tag) const + { + // We could alternatively just use eastl::distance() here, but we happen to + // know that such code would boil down to what we have here, and we might + // as well remove function calls where possible. + difference_type d = 0; + + for(const_iterator temp(begin()), tempEnd(end()); temp != tempEnd; ++temp) + ++d; + + return (size_type)d; + } + */ + + /* + template + typename ring_buffer::size_type + ring_buffer::DoGetSize(EASTL_ITC_NS::random_access_iterator_tag) const + { + // A simpler but less efficient implementation fo this function would be: + // return eastl::distance(mBegin, mEnd); + // + // The calculation of distance here takes advantage of the fact that random + // access iterators' distances can be calculated by simple pointer calculation. + // Thus the code below boils down to a few subtractions when using a vector, + // string, or array as the Container type. + // + const difference_type dBegin = eastl::distance(const_cast(c).begin(), mBegin); // const_cast here solves a little compiler + const difference_type dEnd = eastl::distance(const_cast(c).begin(), mEnd); // argument matching problem. + + if(dEnd >= dBegin) + return dEnd - dBegin; + + return c.size() - (dBegin - dEnd); + } + */ + + + namespace Internal + { + /////////////////////////////////////////////////////////////// + // has_overflow_allocator + // + // returns true_type when the specified container type is an + // eastl::fixed_* container and therefore has an overflow + // allocator type. + // + template + struct has_overflow_allocator : false_type {}; + + template + struct has_overflow_allocator().get_overflow_allocator())>> : true_type {}; + + + /////////////////////////////////////////////////////////////// + // GetFixedContainerCtorAllocator + // + // eastl::fixed_* containers are only constructible via their + // overflow allocator type. This helper select the appropriate + // allocator from the specified container. + // + template ()()> + struct GetFixedContainerCtorAllocator + { + auto& operator()(Container& c) { return c.get_overflow_allocator(); } + }; + + template + struct GetFixedContainerCtorAllocator + { + auto& operator()(Container& c) { return c.get_allocator(); } + }; + } // namespace Internal + + + /////////////////////////////////////////////////////////////// + // ContainerTemporary + // + // Helper type which prevents utilizing excessive stack space + // when creating temporaries when swapping/copying the underlying + // ring_buffer container type. + // + template = EASTL_MAX_STACK_USAGE)> + struct ContainerTemporary + { + Container mContainer; + + ContainerTemporary(Container& parentContainer) + : mContainer(Internal::GetFixedContainerCtorAllocator{}(parentContainer)) + { + } + + Container& get() { return mContainer; } + }; + + template + struct ContainerTemporary + { + typename Container::allocator_type* mAllocator; + Container* mContainer; + + ContainerTemporary(Container& parentContainer) + : mAllocator(&parentContainer.get_allocator()) + , mContainer(new (mAllocator->allocate(sizeof(Container))) Container) + { + } + + ~ContainerTemporary() + { + mContainer->~Container(); + mAllocator->deallocate(mContainer, sizeof(Container)); + } + + Container& get() { return *mContainer; } + }; + + + template + void ring_buffer::resize(size_type n) + { + // Note that if n > size(), we just move the end position out to + // the begin + n, with the data being the old end and the new end + // being stale values from the past. This is by design, as the concept + // of arbitrarily resizing a ring buffer like this is currently deemed + // to be vague in what it intends to do. We can only assume that the + // user knows what he is doing and will deal with the stale values. + EASTL_ASSERT(c.size() >= 1); + const size_type cap = (c.size() - 1); + + mSize = n; + + if(n > cap) // If we need to grow in capacity... + { + // Given that a growing operation will always result in memory allocation, + // we currently implement this function via the usage of a temp container. + // This makes for a simple implementation, but in some cases it is less + // efficient. In particular, if the container is a node-based container like + // a (linked) list, this function would be faster if we simply added nodes + // to ourself. We would do this by inserting the nodes to be after end() + // and adjusting the begin() position if it was after end(). + + // To do: This code needs to be amended to deal with possible exceptions + // that could occur during the resize call below. + + ContainerTemporary cTemp(c); + cTemp.get().resize(n + 1); + eastl::copy(begin(), end(), cTemp.get().begin()); + eastl::swap(c, cTemp.get()); + + mBegin = c.begin(); + mEnd = mBegin; + eastl::advance(mEnd, n); // We can do a simple advance algorithm on this because we know that mEnd will not wrap around. + } + else // We could do a check here for n != size(), but that would be costly and people don't usually resize things to their same size. + { + mEnd = mBegin; + + // eastl::advance(mEnd, n); // We *cannot* use this because there may be wraparound involved. + + // To consider: Possibly we should implement some more detailed logic to optimize the code here. + // We'd need to do different behaviour dending on whether the container iterator type is a + // random access iterator or otherwise. + + while(n--) + { + if(EASTL_UNLIKELY(++mEnd == c.end())) + mEnd = c.begin(); + } + } + } + + + template + typename ring_buffer::size_type + ring_buffer::capacity() const EA_NOEXCEPT + { + EASTL_ASSERT(c.size() >= 1); // This is required because even an empty ring_buffer has one unused termination element, somewhat like a \0 at the end of a C string. + + return (c.size() - 1); // Need to subtract one because the position at mEnd is unused. + } + + + template + void ring_buffer::set_capacity(size_type n) + { + const size_type capacity = (c.size() - 1); + + if(n != capacity) // If we need to change capacity... + { + ContainerTemporary cTemp(c); + cTemp.get().resize(n + 1); + + iterator itCopyBegin = begin(); + + if(n < mSize) // If we are shrinking the capacity, to less than our size... + { + eastl::advance(itCopyBegin, mSize - n); + mSize = n; + } + + eastl::copy(itCopyBegin, end(), cTemp.get().begin()); // The begin-end range may in fact be larger than n, in which case values will be overwritten. + eastl::swap(c, cTemp.get()); + + mBegin = c.begin(); + mEnd = mBegin; + eastl::advance(mEnd, mSize); // We can do a simple advance algorithm on this because we know that mEnd will not wrap around. + } + } + + + template + void ring_buffer::reserve(size_type n) + { + // We follow the pattern of vector and only do something if n > capacity. + EASTL_ASSERT(c.size() >= 1); + + if(n > (c.size() - 1)) // If we need to grow in capacity... // (c.size() - 1) == capacity(); we are attempting to reduce function calls. + { + ContainerTemporary cTemp(c); + cTemp.get().resize(n + 1); + eastl::copy(begin(), end(), cTemp.get().begin()); + eastl::swap(c, cTemp.get()); + + mBegin = c.begin(); + mEnd = mBegin; + eastl::advance(mEnd, mSize); // We can do a simple advance algorithm on this because we know that mEnd will not wrap around. + } + } + + + template + typename ring_buffer::reference + ring_buffer::front() + { + return *mBegin; + } + + + template + typename ring_buffer::const_reference + ring_buffer::front() const + { + return *mBegin; + } + + + template + typename ring_buffer::reference + ring_buffer::back() + { + // return *(end() - 1); // Can't use this because not all iterators support operator-. + + iterator temp(end()); // To do: Find a way to construct this temporary in the return statement. + return *(--temp); // We can do it by making all our containers' iterators support operator-. + } + + + template + typename ring_buffer::const_reference + ring_buffer::back() const + { + // return *(end() - 1); // Can't use this because not all iterators support operator-. + + const_iterator temp(end()); // To do: Find a way to construct this temporary in the return statement. + return *(--temp); // We can do it by making all our containers' iterators support operator-. + } + + + /// A push_back operation on a ring buffer assigns the new value to end. + /// If there is no more space in the buffer, this will result in begin + /// being overwritten and the begin position being moved foward one position. + template + void ring_buffer::push_back(const value_type& value) + { + *mEnd = value; + + if(++mEnd == c.end()) + mEnd = c.begin(); + + if(mEnd == mBegin) + { + if(++mBegin == c.end()) + mBegin = c.begin(); + } + else + ++mSize; + } + + + /// A push_back operation on a ring buffer assigns the new value to end. + /// If there is no more space in the buffer, this will result in begin + /// being overwritten and the begin position being moved foward one position. + template + typename ring_buffer::reference + ring_buffer::push_back() + { + // We don't do the following assignment, as the value at mEnd is already constructed; + // it is merely possibly not default-constructed. However, the spirit of push_back + // is that the user intends to do an assignment or data modification after the + // push_back call. The user can always execute *back() = value_type() if he wants. + //*mEnd = value_type(); + + if(++mEnd == c.end()) + mEnd = c.begin(); + + if(mEnd == mBegin) + { + if(++mBegin == c.end()) + mBegin = c.begin(); + } + else + ++mSize; + + return back(); + } + + + template + void ring_buffer::pop_back() + { + EASTL_ASSERT(mEnd != mBegin); // We assume that size() > 0 and thus that there is something to pop. + + if(EASTL_UNLIKELY(mEnd == c.begin())) + mEnd = c.end(); + --mEnd; + --mSize; + } + + + template + void ring_buffer::push_front(const value_type& value) + { + if(EASTL_UNLIKELY(mBegin == c.begin())) + mBegin = c.end(); + + if(--mBegin == mEnd) + { + if(EASTL_UNLIKELY(mEnd == c.begin())) + mEnd = c.end(); + --mEnd; + } + else + ++mSize; + + *mBegin = value; + } + + + template + typename ring_buffer::reference + ring_buffer::push_front() + { + if(EASTL_UNLIKELY(mBegin == c.begin())) + mBegin = c.end(); + + if(--mBegin == mEnd) + { + if(EASTL_UNLIKELY(mEnd == c.begin())) + mEnd = c.end(); + --mEnd; + } + else + ++mSize; + + // See comments above in push_back for why we don't execute this: + // *mBegin = value_type(); + + return *mBegin; // Same as return front(); + } + + + template + void ring_buffer::pop_front() + { + EASTL_ASSERT(mBegin != mEnd); // We assume that mEnd > mBegin and thus that there is something to pop. + + if(++mBegin == c.end()) + mBegin = c.begin(); + --mSize; + } + + + template + typename ring_buffer::reference + ring_buffer::operator[](size_type n) + { + // return *(begin() + n); // Can't use this because not all iterators support operator+. + + // This should compile to code that is nearly as efficient as that above. + // The primary difference is the possible generation of a temporary in this case. + iterator temp(begin()); + eastl::advance(temp, n); + return *(temp.mContainerIterator); + } + + + template + typename ring_buffer::const_reference + ring_buffer::operator[](size_type n) const + { + // return *(begin() + n); // Can't use this because not all iterators support operator+. + + // This should compile to code that is nearly as efficient as that above. + // The primary difference is the possible generation of a temporary in this case. + const_iterator temp(begin()); + eastl::advance(temp, n); + return *(temp.mContainerIterator); + } + + + template + typename ring_buffer::iterator + ring_buffer::insert(const_iterator position, const value_type& value) + { + // To consider: It would be faster if we could tell that position was in the first + // half of the container and instead of moving things after the position back, + // we could move things before the position forward. + + iterator afterEnd(end()); + iterator beforeEnd(afterEnd); + + ++afterEnd; + + if(afterEnd.mContainerIterator == mBegin) // If we are at full capacity... + --beforeEnd; + else + push_back(); + + iterator itPosition(position.mpContainer, position.mContainerIterator); // We merely copy from const_iterator to iterator. + eastl::copy_backward(itPosition, beforeEnd, end()); + *itPosition = value; + + return itPosition; + } + + + template + void ring_buffer::insert(const_iterator position, size_type n, const value_type& value) + { + // To do: This can be improved with a smarter version. However, + // this is a little tricky because we need to deal with the case + // whereby n is greater than the size of the container itself. + while(n--) + insert(position, value); + } + + + template + void ring_buffer::insert(const_iterator position, std::initializer_list ilist) + { + insert(position, ilist.begin(), ilist.end()); + } + + + template + template + void ring_buffer::insert(const_iterator position, InputIterator first, InputIterator last) + { + // To do: This can possibly be improved with a smarter version. + // However, this can be tricky if distance(first, last) is greater + // than the size of the container itself. + for(; first != last; ++first, ++position) + insert(position, *first); + } + + + template + typename ring_buffer::iterator + ring_buffer::erase(const_iterator position) + { + iterator itPosition(position.mpContainer, position.mContainerIterator); // We merely copy from const_iterator to iterator. + iterator iNext(itPosition); + + eastl::copy(++iNext, end(), itPosition); + pop_back(); + + return itPosition; + } + + + template + typename ring_buffer::iterator + ring_buffer::erase(const_iterator first, const_iterator last) + { + iterator itFirst(first.mpContainer, first.mContainerIterator); // We merely copy from const_iterator to iterator. + iterator itLast(last.mpContainer, last.mContainerIterator); + + typename iterator::difference_type d = eastl::distance(itFirst, itLast); + + eastl::copy(itLast, end(), itFirst); + + while(d--) // To do: improve this implementation. + pop_back(); + + return itFirst; + } + + + template + typename ring_buffer::reverse_iterator + ring_buffer::erase(const_reverse_iterator position) + { + return reverse_iterator(erase((++position).base())); + } + + + template + typename ring_buffer::reverse_iterator + ring_buffer::erase(const_reverse_iterator first, const_reverse_iterator last) + { + // Version which erases in order from first to last. + // difference_type i(first.base() - last.base()); + // while(i--) + // first = erase(first); + // return first; + + // Version which erases in order from last to first, but is slightly more efficient: + return reverse_iterator(erase((++last).base(), (++first).base())); + } + + + template + void ring_buffer::clear() + { + // Don't clear the container; we use its valid data for our elements. + mBegin = c.begin(); + mEnd = c.begin(); + mSize = 0; + } + + + template + typename ring_buffer::container_type& + ring_buffer::get_container() + { + return c; + } + + + template + const typename ring_buffer::container_type& + ring_buffer::get_container() const + { + return c; + } + + + template + inline bool ring_buffer::validate() const + { + if(!c.validate()) // This requires that the container implement the validate function. That pretty much + return false; // means that the container is an EASTL container and not a std STL container. + + if(c.empty()) // c must always have a size of at least 1, as even an empty ring_buffer has an unused terminating element. + return false; + + if(size() > capacity()) + return false; + + if((validate_iterator(begin()) & (isf_valid | isf_current)) != (isf_valid | isf_current)) + return false; + + if((validate_iterator(end()) & (isf_valid | isf_current)) != (isf_valid | isf_current)) + return false; + + // Verify that the size calculation is consistent. + size_type n = 0; + for(const_iterator i(begin()), iEnd(end()); i != iEnd; ++i) + ++n; + if(n != mSize) + return false; + + return true; + } + + + template + inline int ring_buffer::validate_iterator(const_iterator i) const + { + // To do: Replace this with a more efficient implementation if possible. + + for(const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp) + { + if(temp == i) + return (isf_valid | isf_current | isf_can_dereference); + } + + if(i == end()) + return (isf_valid | isf_current); + + return isf_none; + } + + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + inline bool operator==(const ring_buffer& a, const ring_buffer& b) + { + return (a.size() == b.size()) && eastl::equal(a.begin(), a.end(), b.begin()); + } + + + template + inline bool operator<(const ring_buffer& a, const ring_buffer& b) + { + return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end()); + } + + + template + inline bool operator!=(const ring_buffer& a, const ring_buffer& b) + { + return !(a == b); + } + + + template + inline bool operator>(const ring_buffer& a, const ring_buffer& b) + { + return (b < a); + } + + + template + inline bool operator<=(const ring_buffer& a, const ring_buffer& b) + { + return !(b < a); + } + + + template + inline bool operator>=(const ring_buffer& a, const ring_buffer& b) + { + return !(a < b); + } + + + template + inline void swap(ring_buffer& a, ring_buffer& b) + { + a.swap(b); + } + + +} // namespace eastl + + +#endif // Header include guard + + + + + + + + + + + + + + + + + + + diff --git a/external/EASTL/include/EASTL/bonus/sort_extra.h b/external/EASTL/include/EASTL/bonus/sort_extra.h new file mode 100644 index 00000000..5f9a0c46 --- /dev/null +++ b/external/EASTL/include/EASTL/bonus/sort_extra.h @@ -0,0 +1,204 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +////////////////////////////////////////////////////////////////////////////// +// This file implements additional sort algorithms beyond the basic set. +// Included here are: +// selection_sort -- Unstable. +// shaker_sort -- Stable. +// bucket_sort -- Stable. +// +////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_SORT_EXTRA_H +#define EASTL_SORT_EXTRA_H + + +#include +#include +#include +#include +#include +#include // For backwards compatibility due to sorts moved from here to sort.h. +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + /// selection_sort + /// + /// Implements the SelectionSort algorithm. + /// + template + void selection_sort(ForwardIterator first, ForwardIterator last, StrictWeakOrdering compare) + { + ForwardIterator iCurrent, iMin; + + for(; first != last; ++first) + { + iCurrent = first; + iMin = iCurrent; + + for(++iCurrent; iCurrent != last; ++iCurrent) + { + if(compare(*iCurrent, *iMin)) + { + EASTL_VALIDATE_COMPARE(!compare(*iMin, *iCurrent)); // Validate that the compare function is sane. + iMin = iCurrent; + } + } + + if(first != iMin) + eastl::iter_swap(first, iMin); + } + } // selection_sort + + template + inline void selection_sort(ForwardIterator first, ForwardIterator last) + { + typedef eastl::less::value_type> Less; + + eastl::selection_sort(first, last, Less()); + } + + + + /// shaker_sort + /// + /// Implements the ShakerSort algorithm, which is a sorting algorithm which + /// improves on bubble_sort by sweeping both from left to right and right + /// to left, resulting in less iteration. + /// + template + void shaker_sort(BidirectionalIterator first, BidirectionalIterator last, StrictWeakOrdering compare) + { + if(first != last) + { + BidirectionalIterator iCurrent, iNext, iLastModified; + + --last; + + while(first != last) + { + iLastModified = first; + + for(iCurrent = first; iCurrent != last; iCurrent = iNext) + { + iNext = iCurrent; + ++iNext; + + if(compare(*iNext, *iCurrent)) + { + EASTL_VALIDATE_COMPARE(!compare(*iCurrent, *iNext)); // Validate that the compare function is sane. + iLastModified = iCurrent; + eastl::iter_swap(iCurrent, iNext); + } + } + + last = iLastModified; + + if(first != last) + { + for(iCurrent = last; iCurrent != first; iCurrent = iNext) + { + iNext = iCurrent; + --iNext; + + if(compare(*iCurrent, *iNext)) + { + EASTL_VALIDATE_COMPARE(!compare(*iNext, *iCurrent)); // Validate that the compare function is sane. + iLastModified = iCurrent; + eastl::iter_swap(iNext, iCurrent); + } + } + first = iLastModified; + } + } + } + } // shaker_sort + + template + inline void shaker_sort(BidirectionalIterator first, BidirectionalIterator last) + { + typedef eastl::less::value_type> Less; + + eastl::shaker_sort(first, last, Less()); + } + + + + /// bucket_sort + /// + /// Implements the BucketSort algorithm. + /// + /// Example usage: + /// const size_t kElementRange = 32; + /// vector intArray(1000); + /// + /// for(int i = 0; i < 1000; i++) + /// intArray[i] = rand() % kElementRange; + /// + /// vector< vector > bucketArray(kElementRange); + /// bucket_sort(intArray.begin(), intArray.end(), bucketArray, eastl::hash_use_self()); + /// + template + struct hash_use_self + { + T operator()(const T& x) const + { return x; } + }; + + // Requires buckeyArray to be an array of arrays with a size equal to the range of values + // returned by the hash function. The hash function is required to return a unique value + // for each uniquely sorted element. Usually the way this is done is the elements are + // integers of a limited range (e.g. 0-64) and the hash function returns the element value + // itself. If you had a case where all elements were always even numbers (e.g. 0-128), + // you could use a custom hash function that returns (element value / 2). + // + // The user is required to provide an empty bucketArray to this function. This function returns + // with the bucketArray non-empty. This function doesn't clear the bucketArray because that takes + // time and the user might not need it to be cleared, at least at that time. + // + template + void bucket_sort(ForwardIterator first, ForwardIterator last, ContainerArray& bucketArray, HashFunction hash /*= hash_use_self*/) + { + for(ForwardIterator iInput = first; iInput != last; ++iInput) + bucketArray[hash(*iInput)].push_back(*iInput); + + for(typename ContainerArray::const_iterator iBucket = bucketArray.begin(); iBucket != bucketArray.end(); ++iBucket) + first = eastl::copy((*iBucket).begin(), (*iBucket).end(), first); + } + + + +} // namespace eastl + + +#endif // Header include guard + + + + + + + + + + + + + + + + + + + + diff --git a/external/EASTL/include/EASTL/bonus/tuple_vector.h b/external/EASTL/include/EASTL/bonus/tuple_vector.h new file mode 100644 index 00000000..6c238426 --- /dev/null +++ b/external/EASTL/include/EASTL/bonus/tuple_vector.h @@ -0,0 +1,1599 @@ +/////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +/////////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// tuple_vector is a data container that is designed to abstract and simplify +// the handling of a "structure of arrays" layout of data in memory. In +// particular, it mimics the interface of vector, including functionality to do +// inserts, erases, push_backs, and random-access. It also provides a +// RandomAccessIterator and corresponding functionality, making it compatible +// with most STL (and STL-esque) algorithms such as ranged-for loops, find_if, +// remove_if, or sort. + +// When used or applied properly, this container can improve performance of +// some algorithms through cache-coherent data accesses or allowing for +// sensible SIMD programming, while keeping the structure of a single +// container, to permit a developer to continue to use existing algorithms in +// STL and the like. +// +// Consult doc/Bonus/tuple_vector_readme.md for more information. +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_TUPLEVECTOR_H +#define EASTL_TUPLEVECTOR_H + +#include +#include +#include +#include +#include +#include +#if EASTL_EXCEPTIONS_ENABLED +#include +#endif + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + +EA_DISABLE_VC_WARNING(4244) // warning C4244: 'conversion from '___' to '___', possible loss of data +EA_DISABLE_VC_WARNING(4623) // warning C4623: default constructor was implicitly defined as deleted +EA_DISABLE_VC_WARNING(4625) // warning C4625: copy constructor was implicitly defined as deleted +EA_DISABLE_VC_WARNING(4510) // warning C4510: default constructor could not be generated + +namespace eastl +{ + /// EASTL_TUPLE_VECTOR_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// + #ifndef EASTL_TUPLE_VECTOR_DEFAULT_NAME + #define EASTL_TUPLE_VECTOR_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " tuple-vector" // Unless the user overrides something, this is "EASTL tuple-vector". + #endif + + + /// EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR + #define EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR allocator_type(EASTL_TUPLE_VECTOR_DEFAULT_NAME) + #endif + +namespace TupleVecInternal +{ + +// forward declarations +template +struct tuplevec_element; + +template +using tuplevec_element_t = typename tuplevec_element::type; + +template +struct TupleTypes {}; + +template +class TupleVecImpl; + +template +struct TupleRecurser; + +template +struct TupleIndexRecurser; + +template +struct TupleVecLeaf; + +template +struct TupleVecIter; + +// tuplevec_element helper to be able to isolate a type given an index +template +struct tuplevec_element +{ + static_assert(I != I, "tuplevec_element index out of range"); +}; + +template +struct tuplevec_element<0, T, Ts...> +{ + tuplevec_element() = delete; // tuplevec_element should only be used for compile-time assistance, and never be instantiated + typedef T type; +}; + +template +struct tuplevec_element +{ + typedef tuplevec_element_t type; +}; + +// attempt to isolate index given a type +template +struct tuplevec_index +{ +}; + +template +struct tuplevec_index> +{ + typedef void DuplicateTypeCheck; + tuplevec_index() = delete; // tuplevec_index should only be used for compile-time assistance, and never be instantiated + static const eastl_size_t index = 0; +}; + +template +struct tuplevec_index> +{ + typedef int DuplicateTypeCheck; + static_assert(is_void>::DuplicateTypeCheck>::value, "duplicate type T in tuple_vector::get(); unique types must be provided in declaration, or only use get()"); + + static const eastl_size_t index = 0; +}; + +template +struct tuplevec_index> +{ + typedef typename tuplevec_index>::DuplicateTypeCheck DuplicateTypeCheck; + static const eastl_size_t index = tuplevec_index>::index + 1; +}; + +template +struct tuplevec_index> : public tuplevec_index> +{ +}; + + +// helper to calculate the layout of the allocations for the tuple of types (esp. to take alignment into account) +template <> +struct TupleRecurser<> +{ + typedef eastl_size_t size_type; + + // This class should never be instantiated. This is just a helper for working with static functions when anonymous functions don't work + // and provide some other utilities + TupleRecurser() = delete; + + static EA_CONSTEXPR size_type GetTotalAlignment() + { + return 0; + } + + static EA_CONSTEXPR size_type GetTotalAllocationSize(size_type capacity, size_type offset) + { + EA_UNUSED(capacity); + return offset; + } + + template + static pair DoAllocate(TupleVecImpl &vec, void** ppNewLeaf, size_type capacity, size_type offset) + { + EA_UNUSED(ppNewLeaf); + + // If n is zero, then we allocate no memory and just return NULL. + // This is fine, as our default ctor initializes with NULL pointers. + size_type alignment = TupleRecurser::GetTotalAlignment(); + void* ptr = capacity ? allocate_memory(vec.get_allocator(), offset, alignment, 0) : nullptr; + + #if EASTL_ASSERT_ENABLED + if (EASTL_UNLIKELY((size_t)ptr & (alignment - 1)) != 0) + { + EASTL_FAIL_MSG("tuple_vector::DoAllocate -- memory not alignment at requested alignment"); + } + #endif + + return make_pair(ptr, offset); + } + + template + static void SetNewData(TupleVecImplType &vec, void* pData, size_type capacity, size_type offset) + { + EA_UNUSED(vec); + EA_UNUSED(pData); + EA_UNUSED(capacity); + EA_UNUSED(offset); + } +}; + +template +struct TupleRecurser : TupleRecurser +{ + typedef eastl_size_t size_type; + + static EA_CONSTEXPR size_type GetTotalAlignment() + { + return max(static_cast(alignof(T)), TupleRecurser::GetTotalAlignment()); + } + + static EA_CONSTEXPR size_type GetTotalAllocationSize(size_type capacity, size_type offset) + { + return TupleRecurser::GetTotalAllocationSize(capacity, CalculateAllocationSize(offset, capacity)); + } + + template + static pair DoAllocate(TupleVecImpl &vec, void** ppNewLeaf, size_type capacity, size_type offset) + { + size_type allocationOffset = CalculatAllocationOffset(offset); + size_type allocationSize = CalculateAllocationSize(offset, capacity); + pair allocation = TupleRecurser::template DoAllocate( + vec, ppNewLeaf, capacity, allocationSize); + ppNewLeaf[I] = (void*)((uintptr_t)(allocation.first) + allocationOffset); + return allocation; + } + + template + static void SetNewData(TupleVecImplType &vec, void* pData, size_type capacity, size_type offset) + { + size_type allocationOffset = CalculatAllocationOffset(offset); + size_type allocationSize = CalculateAllocationSize(offset, capacity); + vec.TupleVecLeaf::mpData = (T*)((uintptr_t)pData + allocationOffset); + TupleRecurser::template SetNewData(vec, pData, capacity, allocationSize); + } + +private: + static EA_CONSTEXPR size_type CalculateAllocationSize(size_type offset, size_type capacity) + { + return CalculatAllocationOffset(offset) + sizeof(T) * capacity; + } + + static EA_CONSTEXPR size_type CalculatAllocationOffset(size_type offset) { return (offset + alignof(T) - 1) & (~alignof(T) + 1); } +}; + +template +struct TupleVecLeaf +{ + typedef eastl_size_t size_type; + + void DoUninitializedMoveAndDestruct(const size_type begin, const size_type end, T* pDest) + { + T* pBegin = mpData + begin; + T* pEnd = mpData + end; + eastl::uninitialized_move_if_noexcept(pBegin, pEnd, pDest); + eastl::destruct(pBegin, pEnd); + } + + void DoInsertAndFill(size_type pos, size_type n, size_type numElements, const T& arg) + { + T* pDest = mpData + pos; + T* pDataEnd = mpData + numElements; + const T temp = arg; + const size_type nExtra = (numElements - pos); + if (n < nExtra) // If the inserted values are entirely within initialized memory (i.e. are before mpEnd)... + { + eastl::uninitialized_move(pDataEnd - n, pDataEnd, pDataEnd); + eastl::move_backward(pDest, pDataEnd - n, pDataEnd); // We need move_backward because of potential overlap issues. + eastl::fill(pDest, pDest + n, temp); + } + else + { + eastl::uninitialized_fill_n(pDataEnd, n - nExtra, temp); + eastl::uninitialized_move(pDest, pDataEnd, pDataEnd + n - nExtra); + eastl::fill(pDest, pDataEnd, temp); + } + } + + void DoInsertRange(T* pSrcBegin, T* pSrcEnd, T* pDestBegin, size_type numDataElements) + { + size_type pos = static_cast(pDestBegin - mpData); + size_type n = static_cast(pSrcEnd - pSrcBegin); + T* pDataEnd = mpData + numDataElements; + const size_type nExtra = numDataElements - pos; + if (n < nExtra) // If the inserted values are entirely within initialized memory (i.e. are before mpEnd)... + { + eastl::uninitialized_move(pDataEnd - n, pDataEnd, pDataEnd); + eastl::move_backward(pDestBegin, pDataEnd - n, pDataEnd); // We need move_backward because of potential overlap issues. + eastl::copy(pSrcBegin, pSrcEnd, pDestBegin); + } + else + { + eastl::uninitialized_copy(pSrcEnd - (n - nExtra), pSrcEnd, pDataEnd); + eastl::uninitialized_move(pDestBegin, pDataEnd, pDataEnd + n - nExtra); + eastl::copy(pSrcBegin, pSrcEnd - (n - nExtra), pDestBegin); + } + } + + void DoInsertValue(size_type pos, size_type numElements, T&& arg) + { + T* pDest = mpData + pos; + T* pDataEnd = mpData + numElements; + + eastl::uninitialized_move(pDataEnd - 1, pDataEnd, pDataEnd); + eastl::move_backward(pDest, pDataEnd - 1, pDataEnd); // We need move_backward because of potential overlap issues. + eastl::destruct(pDest); + ::new (pDest) T(eastl::forward(arg)); + } + + T* mpData = nullptr; +}; + +// swallow allows for parameter pack expansion of arguments as means of expanding operations performed +// if a void function is used for operation expansion, it should be wrapped in (..., 0) so that the compiler +// thinks it has a parameter to pass into the function +template +void swallow(Ts&&...) { } + +inline bool variadicAnd(bool cond) { return cond; } + +inline bool variadicAnd(bool cond, bool conds...) { return cond && variadicAnd(conds); } + +// Helper struct to check for strict compatibility between two iterators, whilst still allowing for +// conversion between TupleVecImpl::iterator and TupleVecImpl::const_iterator. +template +struct TupleVecIterCompatibleImpl : public false_type { }; + +template<> +struct TupleVecIterCompatibleImpl, TupleTypes<>> : public true_type { }; + +template +struct TupleVecIterCompatibleImpl, TupleTypes> : public integral_constant, TupleTypes>::value && + is_same::type, typename remove_const::type>::value > +{ }; + +template +struct TupleVecIterCompatible; + +template +struct TupleVecIterCompatible, TupleTypes> : + public TupleVecIterCompatibleImpl, TupleTypes> +{ }; + +// The Iterator operates by storing a persistent index internally, +// and resolving the tuple of pointers to the various parts of the original tupleVec when dereferenced. +// While resolving the tuple is a non-zero operation, it consistently generated better code than the alternative of +// storing - and harmoniously updating on each modification - a full tuple of pointers to the tupleVec's data +template +struct TupleVecIter, Ts...> +{ +private: + typedef TupleVecIter, Ts...> this_type; + typedef eastl_size_t size_type; + + template + friend struct TupleVecIter; + + template + friend class TupleVecImpl; + + template + friend class move_iterator; +public: + typedef EASTL_ITC_NS::random_access_iterator_tag iterator_category; + typedef tuple value_type; + typedef eastl_size_t difference_type; + typedef tuple pointer; + typedef tuple reference; + + TupleVecIter() = default; + + template + TupleVecIter(VecImplType* tupleVec, size_type index) + : mIndex(index) + , mpData{(void*)tupleVec->TupleVecLeaf::mpData...} + { } + + template , TupleTypes>::value, bool>::type> + TupleVecIter(const TupleVecIter& other) + : mIndex(other.mIndex) + , mpData{other.mpData[Indices]...} + { + } + + bool operator==(const TupleVecIter& other) const { return mIndex == other.mIndex && mpData[0] == other.mpData[0]; } + bool operator!=(const TupleVecIter& other) const { return mIndex != other.mIndex || mpData[0] != other.mpData[0]; } + reference operator*() const { return MakeReference(); } + + this_type& operator++() { ++mIndex; return *this; } + this_type operator++(int) + { + this_type temp = *this; + ++mIndex; + return temp; + } + + this_type& operator--() { --mIndex; return *this; } + this_type operator--(int) + { + this_type temp = *this; + --mIndex; + return temp; + } + + this_type& operator+=(difference_type n) { mIndex += n; return *this; } + this_type operator+(difference_type n) const + { + this_type temp = *this; + return temp += n; + } + friend this_type operator+(difference_type n, const this_type& rhs) + { + this_type temp = rhs; + return temp += n; + } + + this_type& operator-=(difference_type n) { mIndex -= n; return *this; } + this_type operator-(difference_type n) const + { + this_type temp = *this; + return temp -= n; + } + friend this_type operator-(difference_type n, const this_type& rhs) + { + this_type temp = rhs; + return temp -= n; + } + + difference_type operator-(const this_type& rhs) const { return mIndex - rhs.mIndex; } + bool operator<(const this_type& rhs) const { return mIndex < rhs.mIndex; } + bool operator>(const this_type& rhs) const { return mIndex > rhs.mIndex; } + bool operator>=(const this_type& rhs) const { return mIndex >= rhs.mIndex; } + bool operator<=(const this_type& rhs) const { return mIndex <= rhs.mIndex; } + + reference operator[](const size_type n) const + { + return *(*this + n); + } + +private: + + value_type MakeValue() const + { + return value_type(((Ts*)mpData[Indices])[mIndex]...); + } + + reference MakeReference() const + { + return reference(((Ts*)mpData[Indices])[mIndex]...); + } + + pointer MakePointer() const + { + return pointer(&((Ts*)mpData[Indices])[mIndex]...); + } + + size_type mIndex = 0; + const void* mpData[sizeof...(Ts)]; +}; + +// TupleVecImpl +template +class TupleVecImpl, Ts...> : public TupleVecLeaf... +{ + typedef Allocator allocator_type; + typedef index_sequence index_sequence_type; + typedef TupleVecImpl this_type; + typedef TupleVecImpl const_this_type; + +public: + typedef TupleVecInternal::TupleVecIter iterator; + typedef TupleVecInternal::TupleVecIter const_iterator; + typedef eastl::reverse_iterator reverse_iterator; + typedef eastl::reverse_iterator const_reverse_iterator; + typedef eastl_size_t size_type; + typedef eastl::tuple value_tuple; + typedef eastl::tuple reference_tuple; + typedef eastl::tuple const_reference_tuple; + typedef eastl::tuple ptr_tuple; + typedef eastl::tuple const_ptr_tuple; + typedef eastl::tuple rvalue_tuple; + + TupleVecImpl() + : mDataSizeAndAllocator(0, EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR) + {} + + TupleVecImpl(const allocator_type& allocator) + : mDataSizeAndAllocator(0, allocator) + {} + + TupleVecImpl(this_type&& x) + : mDataSizeAndAllocator(0, eastl::move(x.get_allocator())) + { + swap(x); + } + + TupleVecImpl(this_type&& x, const Allocator& allocator) + : mDataSizeAndAllocator(0, allocator) + { + if (get_allocator() == x.get_allocator()) // If allocators are equivalent, then we can safely swap member-by-member + { + swap(x); + } + else + { + this_type temp(eastl::move(*this)); + temp.swap(x); + } + } + + TupleVecImpl(const this_type& x) + : mDataSizeAndAllocator(0, x.get_allocator()) + { + DoInitFromIterator(x.begin(), x.end()); + } + + template + TupleVecImpl(const TupleVecImpl& x, const Allocator& allocator) + : mDataSizeAndAllocator(0, allocator) + { + DoInitFromIterator(x.begin(), x.end()); + } + + template + TupleVecImpl(move_iterator begin, move_iterator end, const allocator_type& allocator = EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR) + : mDataSizeAndAllocator(0, allocator) + { + DoInitFromIterator(begin, end); + } + + TupleVecImpl(const_iterator begin, const_iterator end, const allocator_type& allocator = EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR) + : mDataSizeAndAllocator(0, allocator ) + { + DoInitFromIterator(begin, end); + } + + TupleVecImpl(size_type n, const allocator_type& allocator = EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR) + : mDataSizeAndAllocator(0, allocator) + { + DoInitDefaultFill(n); + } + + TupleVecImpl(size_type n, const Ts&... args) + : mDataSizeAndAllocator(0, EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR) + { + DoInitFillArgs(n, args...); + } + + TupleVecImpl(size_type n, const Ts&... args, const allocator_type& allocator) + : mDataSizeAndAllocator(0, allocator) + { + DoInitFillArgs(n, args...); + } + + TupleVecImpl(size_type n, const_reference_tuple tup, const allocator_type& allocator = EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR) + : mDataSizeAndAllocator(0, allocator) + { + DoInitFillTuple(n, tup); + } + + TupleVecImpl(const value_tuple* first, const value_tuple* last, const allocator_type& allocator = EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR) + : mDataSizeAndAllocator(0, allocator) + { + DoInitFromTupleArray(first, last); + } + + TupleVecImpl(std::initializer_list iList, const allocator_type& allocator = EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR) + : mDataSizeAndAllocator(0, allocator) + { + DoInitFromTupleArray(iList.begin(), iList.end()); + } + +protected: + // ctor to provide a pre-allocated field of data that the container will own, specifically for fixed_tuple_vector + TupleVecImpl(const allocator_type& allocator, void* pData, size_type capacity, size_type dataSize) + : mpData(pData), mNumCapacity(capacity), mDataSizeAndAllocator(dataSize, allocator) + { + TupleRecurser::template SetNewData(*this, mpData, mNumCapacity, 0); + } + +public: + ~TupleVecImpl() + { + swallow((eastl::destruct(TupleVecLeaf::mpData, TupleVecLeaf::mpData + mNumElements), 0)...); + if (mpData) + EASTLFree(get_allocator(), mpData, internalDataSize()); + } + + void assign(size_type n, const Ts&... args) + { + if (n > mNumCapacity) + { + this_type temp(n, args..., get_allocator()); // We have little choice but to reallocate with new memory. + swap(temp); + } + else if (n > mNumElements) // If n > mNumElements ... + { + size_type oldNumElements = mNumElements; + swallow((eastl::fill(TupleVecLeaf::mpData, TupleVecLeaf::mpData + oldNumElements, args), 0)...); + swallow((eastl::uninitialized_fill(TupleVecLeaf::mpData + oldNumElements, + TupleVecLeaf::mpData + n, args), 0)...); + mNumElements = n; + } + else // else 0 <= n <= mNumElements + { + swallow((eastl::fill(TupleVecLeaf::mpData, TupleVecLeaf::mpData + n, args), 0)...); + erase(begin() + n, end()); + } + } + + void assign(const_iterator first, const_iterator last) + { +#if EASTL_ASSERT_ENABLED + if (EASTL_UNLIKELY(!validate_iterator_pair(first, last))) + EASTL_FAIL_MSG("tuple_vector::assign -- invalid iterator pair"); +#endif + size_type newNumElements = last - first; + if (newNumElements > mNumCapacity) + { + this_type temp(first, last, get_allocator()); + swap(temp); + } + else + { + const void* ppOtherData[sizeof...(Ts)] = {first.mpData[Indices]...}; + size_type firstIdx = first.mIndex; + size_type lastIdx = last.mIndex; + if (newNumElements > mNumElements) // If n > mNumElements ... + { + size_type oldNumElements = mNumElements; + swallow((eastl::copy((Ts*)(ppOtherData[Indices]) + firstIdx, + (Ts*)(ppOtherData[Indices]) + firstIdx + oldNumElements, + TupleVecLeaf::mpData), 0)...); + swallow((eastl::uninitialized_copy((Ts*)(ppOtherData[Indices]) + firstIdx + oldNumElements, + (Ts*)(ppOtherData[Indices]) + lastIdx, + TupleVecLeaf::mpData + oldNumElements), 0)...); + mNumElements = newNumElements; + } + else // else 0 <= n <= mNumElements + { + swallow((eastl::copy((Ts*)(ppOtherData[Indices]) + firstIdx, (Ts*)(ppOtherData[Indices]) + lastIdx, + TupleVecLeaf::mpData), 0)...); + erase(begin() + newNumElements, end()); + } + } + } + + void assign(const value_tuple* first, const value_tuple* last) + { +#if EASTL_ASSERT_ENABLED + if (EASTL_UNLIKELY(first > last || first == nullptr || last == nullptr)) + EASTL_FAIL_MSG("tuple_vector::assign from tuple array -- invalid ptrs"); +#endif + size_type newNumElements = static_cast(last - first); + if (newNumElements > mNumCapacity) + { + this_type temp(first, last, get_allocator()); + swap(temp); + } + else + { + if (newNumElements > mNumElements) // If n > mNumElements ... + { + size_type oldNumElements = mNumElements; + + DoCopyFromTupleArray(begin(), begin() + oldNumElements, first); + DoUninitializedCopyFromTupleArray(begin() + oldNumElements, begin() + newNumElements, first + oldNumElements); + mNumElements = newNumElements; + } + else // else 0 <= n <= mNumElements + { + DoCopyFromTupleArray(begin(), begin() + newNumElements, first); + erase(begin() + newNumElements, end()); + } + } + } + + reference_tuple push_back() + { + size_type oldNumElements = mNumElements; + size_type newNumElements = oldNumElements + 1; + size_type oldNumCapacity = mNumCapacity; + mNumElements = newNumElements; + DoGrow(oldNumElements, oldNumCapacity, newNumElements); + swallow(::new(TupleVecLeaf::mpData + oldNumElements) Ts()...); + return back(); + } + + void push_back(const Ts&... args) + { + size_type oldNumElements = mNumElements; + size_type newNumElements = oldNumElements + 1; + size_type oldNumCapacity = mNumCapacity; + mNumElements = newNumElements; + DoGrow(oldNumElements, oldNumCapacity, newNumElements); + swallow(::new(TupleVecLeaf::mpData + oldNumElements) Ts(args)...); + } + + void push_back_uninitialized() + { + size_type oldNumElements = mNumElements; + size_type newNumElements = oldNumElements + 1; + size_type oldNumCapacity = mNumCapacity; + mNumElements = newNumElements; + DoGrow(oldNumElements, oldNumCapacity, newNumElements); + } + + reference_tuple emplace_back(Ts&&... args) + { + size_type oldNumElements = mNumElements; + size_type newNumElements = oldNumElements + 1; + size_type oldNumCapacity = mNumCapacity; + mNumElements = newNumElements; + DoGrow(oldNumElements, oldNumCapacity, newNumElements); + swallow(::new(TupleVecLeaf::mpData + oldNumElements) Ts(eastl::forward(args))...); + return back(); + } + + iterator emplace(const_iterator pos, Ts&&... args) + { +#if EASTL_ASSERT_ENABLED + if (EASTL_UNLIKELY(validate_iterator(pos) == isf_none)) + EASTL_FAIL_MSG("tuple_vector::emplace -- invalid iterator"); +#endif + size_type firstIdx = pos - cbegin(); + size_type oldNumElements = mNumElements; + size_type newNumElements = mNumElements + 1; + size_type oldNumCapacity = mNumCapacity; + mNumElements = newNumElements; + if (newNumElements > oldNumCapacity || firstIdx != oldNumElements) + { + if (newNumElements > oldNumCapacity) + { + const size_type newCapacity = eastl::max(GetNewCapacity(oldNumCapacity), newNumElements); + + void* ppNewLeaf[sizeof...(Ts)]; + pair allocation = TupleRecurser::template DoAllocate( + *this, ppNewLeaf, newCapacity, 0); + + swallow((TupleVecLeaf::DoUninitializedMoveAndDestruct( + 0, firstIdx, (Ts*)ppNewLeaf[Indices]), 0)...); + swallow((TupleVecLeaf::DoUninitializedMoveAndDestruct( + firstIdx, oldNumElements, (Ts*)ppNewLeaf[Indices] + firstIdx + 1), 0)...); + swallow(::new ((Ts*)ppNewLeaf[Indices] + firstIdx) Ts(eastl::forward(args))...); + swallow(TupleVecLeaf::mpData = (Ts*)ppNewLeaf[Indices]...); + + EASTLFree(get_allocator(), mpData, internalDataSize()); + mpData = allocation.first; + mNumCapacity = newCapacity; + internalDataSize() = allocation.second; + } + else + { + swallow((TupleVecLeaf::DoInsertValue(firstIdx, oldNumElements, eastl::forward(args)), 0)...); + } + } + else + { + swallow(::new (TupleVecLeaf::mpData + oldNumElements) Ts(eastl::forward(args))...); + } + return begin() + firstIdx; + } + + iterator insert(const_iterator pos, size_type n, const Ts&... args) + { +#if EASTL_ASSERT_ENABLED + if (EASTL_UNLIKELY(validate_iterator(pos) == isf_none)) + EASTL_FAIL_MSG("tuple_vector::insert -- invalid iterator"); +#endif + size_type firstIdx = pos - cbegin(); + size_type lastIdx = firstIdx + n; + size_type oldNumElements = mNumElements; + size_type newNumElements = mNumElements + n; + size_type oldNumCapacity = mNumCapacity; + mNumElements = newNumElements; + if (newNumElements > oldNumCapacity || firstIdx != oldNumElements) + { + if (newNumElements > oldNumCapacity) + { + const size_type newCapacity = eastl::max(GetNewCapacity(oldNumCapacity), newNumElements); + + void* ppNewLeaf[sizeof...(Ts)]; + pair allocation = TupleRecurser::template DoAllocate( + *this, ppNewLeaf, newCapacity, 0); + + swallow((TupleVecLeaf::DoUninitializedMoveAndDestruct( + 0, firstIdx, (Ts*)ppNewLeaf[Indices]), 0)...); + swallow((TupleVecLeaf::DoUninitializedMoveAndDestruct( + firstIdx, oldNumElements, (Ts*)ppNewLeaf[Indices] + lastIdx), 0)...); + swallow((eastl::uninitialized_fill((Ts*)ppNewLeaf[Indices] + firstIdx, (Ts*)ppNewLeaf[Indices] + lastIdx, args), 0)...); + swallow(TupleVecLeaf::mpData = (Ts*)ppNewLeaf[Indices]...); + + EASTLFree(get_allocator(), mpData, internalDataSize()); + mpData = allocation.first; + mNumCapacity = newCapacity; + internalDataSize() = allocation.second; + } + else + { + swallow((TupleVecLeaf::DoInsertAndFill(firstIdx, n, oldNumElements, args), 0)...); + } + } + else + { + swallow((eastl::uninitialized_fill(TupleVecLeaf::mpData + oldNumElements, + TupleVecLeaf::mpData + newNumElements, args), 0)...); + } + return begin() + firstIdx; + } + + iterator insert(const_iterator pos, const_iterator first, const_iterator last) + { +#if EASTL_ASSERT_ENABLED + if (EASTL_UNLIKELY(validate_iterator(pos) == isf_none)) + EASTL_FAIL_MSG("tuple_vector::insert -- invalid iterator"); + if (EASTL_UNLIKELY(!validate_iterator_pair(first, last))) + EASTL_FAIL_MSG("tuple_vector::insert -- invalid iterator pair"); +#endif + size_type posIdx = pos - cbegin(); + size_type firstIdx = first.mIndex; + size_type lastIdx = last.mIndex; + size_type numToInsert = last - first; + size_type oldNumElements = mNumElements; + size_type newNumElements = oldNumElements + numToInsert; + size_type oldNumCapacity = mNumCapacity; + mNumElements = newNumElements; + const void* ppOtherData[sizeof...(Ts)] = {first.mpData[Indices]...}; + if (newNumElements > oldNumCapacity || posIdx != oldNumElements) + { + if (newNumElements > oldNumCapacity) + { + const size_type newCapacity = eastl::max(GetNewCapacity(oldNumCapacity), newNumElements); + + void* ppNewLeaf[sizeof...(Ts)]; + pair allocation = TupleRecurser::template DoAllocate( + *this, ppNewLeaf, newCapacity, 0); + + swallow((TupleVecLeaf::DoUninitializedMoveAndDestruct( + 0, posIdx, (Ts*)ppNewLeaf[Indices]), 0)...); + swallow((TupleVecLeaf::DoUninitializedMoveAndDestruct( + posIdx, oldNumElements, (Ts*)ppNewLeaf[Indices] + posIdx + numToInsert), 0)...); + swallow((eastl::uninitialized_copy((Ts*)(ppOtherData[Indices]) + firstIdx, + (Ts*)(ppOtherData[Indices]) + lastIdx, + (Ts*)ppNewLeaf[Indices] + posIdx), 0)...); + swallow(TupleVecLeaf::mpData = (Ts*)ppNewLeaf[Indices]...); + + EASTLFree(get_allocator(), mpData, internalDataSize()); + mpData = allocation.first; + mNumCapacity = newCapacity; + internalDataSize() = allocation.second; + } + else + { + swallow((TupleVecLeaf::DoInsertRange( + (Ts*)(ppOtherData[Indices]) + firstIdx, (Ts*)(ppOtherData[Indices]) + lastIdx, + TupleVecLeaf::mpData + posIdx, oldNumElements), 0)...); + } + } + else + { + swallow((eastl::uninitialized_copy((Ts*)(ppOtherData[Indices]) + firstIdx, + (Ts*)(ppOtherData[Indices]) + lastIdx, + TupleVecLeaf::mpData + posIdx), 0)...); + } + return begin() + posIdx; + } + + iterator insert(const_iterator pos, const value_tuple* first, const value_tuple* last) + { +#if EASTL_ASSERT_ENABLED + if (EASTL_UNLIKELY(validate_iterator(pos) == isf_none)) + EASTL_FAIL_MSG("tuple_vector::insert -- invalid iterator"); + if (EASTL_UNLIKELY(first > last || first == nullptr || last == nullptr)) + EASTL_FAIL_MSG("tuple_vector::insert -- invalid source pointers"); +#endif + size_type posIdx = pos - cbegin(); + size_type numToInsert = static_cast(last - first); + size_type oldNumElements = mNumElements; + size_type newNumElements = oldNumElements + numToInsert; + size_type oldNumCapacity = mNumCapacity; + mNumElements = newNumElements; + if (newNumElements > oldNumCapacity || posIdx != oldNumElements) + { + if (newNumElements > oldNumCapacity) + { + const size_type newCapacity = eastl::max(GetNewCapacity(oldNumCapacity), newNumElements); + + void* ppNewLeaf[sizeof...(Ts)]; + pair allocation = TupleRecurser::template DoAllocate( + *this, ppNewLeaf, newCapacity, 0); + + swallow((TupleVecLeaf::DoUninitializedMoveAndDestruct( + 0, posIdx, (Ts*)ppNewLeaf[Indices]), 0)...); + swallow((TupleVecLeaf::DoUninitializedMoveAndDestruct( + posIdx, oldNumElements, (Ts*)ppNewLeaf[Indices] + posIdx + numToInsert), 0)...); + + swallow(TupleVecLeaf::mpData = (Ts*)ppNewLeaf[Indices]...); + + // Do this after mpData is updated so that we can use new iterators + DoUninitializedCopyFromTupleArray(begin() + posIdx, begin() + posIdx + numToInsert, first); + + EASTLFree(get_allocator(), mpData, internalDataSize()); + mpData = allocation.first; + mNumCapacity = newCapacity; + internalDataSize() = allocation.second; + } + else + { + const size_type nExtra = oldNumElements - posIdx; + void* ppDataEnd[sizeof...(Ts)] = { (void*)(TupleVecLeaf::mpData + oldNumElements)... }; + void* ppDataBegin[sizeof...(Ts)] = { (void*)(TupleVecLeaf::mpData + posIdx)... }; + if (numToInsert < nExtra) // If the inserted values are entirely within initialized memory (i.e. are before mpEnd)... + { + swallow((eastl::uninitialized_move((Ts*)ppDataEnd[Indices] - numToInsert, + (Ts*)ppDataEnd[Indices], (Ts*)ppDataEnd[Indices]), 0)...); + // We need move_backward because of potential overlap issues. + swallow((eastl::move_backward((Ts*)ppDataBegin[Indices], + (Ts*)ppDataEnd[Indices] - numToInsert, (Ts*)ppDataEnd[Indices]), 0)...); + + DoCopyFromTupleArray(pos, pos + numToInsert, first); + } + else + { + size_type numToInitialize = numToInsert - nExtra; + swallow((eastl::uninitialized_move((Ts*)ppDataBegin[Indices], + (Ts*)ppDataEnd[Indices], (Ts*)ppDataEnd[Indices] + numToInitialize), 0)...); + + DoCopyFromTupleArray(pos, begin() + oldNumElements, first); + DoUninitializedCopyFromTupleArray(begin() + oldNumElements, pos + numToInsert, first + nExtra); + } + } + } + else + { + DoUninitializedCopyFromTupleArray(pos, pos + numToInsert, first); + } + return begin() + posIdx; + } + + iterator erase(const_iterator first, const_iterator last) + { +#if EASTL_ASSERT_ENABLED + if (EASTL_UNLIKELY(validate_iterator(first) == isf_none || validate_iterator(last) == isf_none)) + EASTL_FAIL_MSG("tuple_vector::erase -- invalid iterator"); + if (EASTL_UNLIKELY(!validate_iterator_pair(first, last))) + EASTL_FAIL_MSG("tuple_vector::erase -- invalid iterator pair"); +#endif + if (first != last) + { + size_type firstIdx = first - cbegin(); + size_type lastIdx = last - cbegin(); + size_type oldNumElements = mNumElements; + size_type newNumElements = oldNumElements - (lastIdx - firstIdx); + mNumElements = newNumElements; + swallow((eastl::move(TupleVecLeaf::mpData + lastIdx, + TupleVecLeaf::mpData + oldNumElements, + TupleVecLeaf::mpData + firstIdx), 0)...); + swallow((eastl::destruct(TupleVecLeaf::mpData + newNumElements, + TupleVecLeaf::mpData + oldNumElements), 0)...); + } + return begin() + first.mIndex; + } + + iterator erase_unsorted(const_iterator pos) + { +#if EASTL_ASSERT_ENABLED + if (EASTL_UNLIKELY(validate_iterator(pos) == isf_none)) + EASTL_FAIL_MSG("tuple_vector::erase_unsorted -- invalid iterator"); +#endif + size_type oldNumElements = mNumElements; + size_type newNumElements = oldNumElements - 1; + mNumElements = newNumElements; + swallow((eastl::move(TupleVecLeaf::mpData + newNumElements, + TupleVecLeaf::mpData + oldNumElements, + TupleVecLeaf::mpData + (pos - begin())), 0)...); + swallow((eastl::destruct(TupleVecLeaf::mpData + newNumElements, + TupleVecLeaf::mpData + oldNumElements), 0)...); + return begin() + pos.mIndex; + } + + void resize(size_type n) + { + size_type oldNumElements = mNumElements; + size_type oldNumCapacity = mNumCapacity; + mNumElements = n; + if (n > oldNumElements) + { + if (n > oldNumCapacity) + { + DoReallocate(oldNumElements, eastl::max(GetNewCapacity(oldNumCapacity), n)); + } + swallow((eastl::uninitialized_value_construct_n(TupleVecLeaf::mpData + oldNumElements, n - oldNumElements), 0)...); + } + else + { + swallow((eastl::destruct(TupleVecLeaf::mpData + n, + TupleVecLeaf::mpData + oldNumElements), 0)...); + } + } + + void resize(size_type n, const Ts&... args) + { + size_type oldNumElements = mNumElements; + size_type oldNumCapacity = mNumCapacity; + mNumElements = n; + if (n > oldNumElements) + { + if (n > oldNumCapacity) + { + DoReallocate(oldNumElements, eastl::max(GetNewCapacity(oldNumCapacity), n)); + } + swallow((eastl::uninitialized_fill(TupleVecLeaf::mpData + oldNumElements, + TupleVecLeaf::mpData + n, args), 0)...); + } + else + { + swallow((eastl::destruct(TupleVecLeaf::mpData + n, + TupleVecLeaf::mpData + oldNumElements), 0)...); + } + } + + void reserve(size_type n) + { + DoConditionalReallocate(mNumElements, mNumCapacity, n); + } + + void shrink_to_fit() + { + this_type temp(move_iterator(begin()), move_iterator(end()), get_allocator()); + swap(temp); + } + + void clear() EA_NOEXCEPT + { + size_type oldNumElements = mNumElements; + mNumElements = 0; + swallow((eastl::destruct(TupleVecLeaf::mpData, TupleVecLeaf::mpData + oldNumElements), 0)...); + } + + void pop_back() + { +#if EASTL_ASSERT_ENABLED + if (EASTL_UNLIKELY(mNumElements <= 0)) + EASTL_FAIL_MSG("tuple_vector::pop_back -- container is empty"); +#endif + size_type oldNumElements = mNumElements--; + swallow((eastl::destruct(TupleVecLeaf::mpData + oldNumElements - 1, + TupleVecLeaf::mpData + oldNumElements), 0)...); + } + + void swap(this_type& x) + { + swallow((eastl::swap(TupleVecLeaf::mpData, x.TupleVecLeaf::mpData), 0)...); + eastl::swap(mpData, x.mpData); + eastl::swap(mNumElements, x.mNumElements); + eastl::swap(mNumCapacity, x.mNumCapacity); + eastl::swap(get_allocator(), x.get_allocator()); + eastl::swap(internalDataSize(), x.internalDataSize()); + } + + void assign(size_type n, const_reference_tuple tup) { assign(n, eastl::get(tup)...); } + void assign(std::initializer_list iList) { assign(iList.begin(), iList.end()); } + + void push_back(Ts&&... args) { emplace_back(eastl::forward(args)...); } + void push_back(const_reference_tuple tup) { push_back(eastl::get(tup)...); } + void push_back(rvalue_tuple tup) { emplace_back(eastl::forward(eastl::get(tup))...); } + + void emplace_back(rvalue_tuple tup) { emplace_back(eastl::forward(eastl::get(tup))...); } + void emplace(const_iterator pos, rvalue_tuple tup) { emplace(pos, eastl::forward(eastl::get(tup))...); } + + iterator insert(const_iterator pos, const Ts&... args) { return insert(pos, 1, args...); } + iterator insert(const_iterator pos, Ts&&... args) { return emplace(pos, eastl::forward(args)...); } + iterator insert(const_iterator pos, rvalue_tuple tup) { return emplace(pos, eastl::forward(eastl::get(tup))...); } + iterator insert(const_iterator pos, const_reference_tuple tup) { return insert(pos, eastl::get(tup)...); } + iterator insert(const_iterator pos, size_type n, const_reference_tuple tup) { return insert(pos, n, eastl::get(tup)...); } + iterator insert(const_iterator pos, std::initializer_list iList) { return insert(pos, iList.begin(), iList.end()); } + + iterator erase(const_iterator pos) { return erase(pos, pos + 1); } + reverse_iterator erase(const_reverse_iterator pos) { return reverse_iterator(erase((pos + 1).base(), (pos).base())); } + reverse_iterator erase(const_reverse_iterator first, const_reverse_iterator last) { return reverse_iterator(erase((last).base(), (first).base())); } + reverse_iterator erase_unsorted(const_reverse_iterator pos) { return reverse_iterator(erase_unsorted((pos + 1).base())); } + + void resize(size_type n, const_reference_tuple tup) { resize(n, eastl::get(tup)...); } + + bool empty() const EA_NOEXCEPT { return mNumElements == 0; } + size_type size() const EA_NOEXCEPT { return mNumElements; } + size_type capacity() const EA_NOEXCEPT { return mNumCapacity; } + + iterator begin() EA_NOEXCEPT { return iterator(this, 0); } + const_iterator begin() const EA_NOEXCEPT { return const_iterator((const_this_type*)(this), 0); } + const_iterator cbegin() const EA_NOEXCEPT { return const_iterator((const_this_type*)(this), 0); } + + iterator end() EA_NOEXCEPT { return iterator(this, size()); } + const_iterator end() const EA_NOEXCEPT { return const_iterator((const_this_type*)(this), size()); } + const_iterator cend() const EA_NOEXCEPT { return const_iterator((const_this_type*)(this), size()); } + + reverse_iterator rbegin() EA_NOEXCEPT { return reverse_iterator(end()); } + const_reverse_iterator rbegin() const EA_NOEXCEPT { return const_reverse_iterator(end()); } + const_reverse_iterator crbegin() const EA_NOEXCEPT { return const_reverse_iterator(end()); } + + reverse_iterator rend() EA_NOEXCEPT { return reverse_iterator(begin()); } + const_reverse_iterator rend() const EA_NOEXCEPT { return const_reverse_iterator(begin()); } + const_reverse_iterator crend() const EA_NOEXCEPT { return const_reverse_iterator(begin()); } + + ptr_tuple data() EA_NOEXCEPT { return ptr_tuple(TupleVecLeaf::mpData...); } + const_ptr_tuple data() const EA_NOEXCEPT { return const_ptr_tuple(TupleVecLeaf::mpData...); } + + reference_tuple at(size_type n) + { +#if EASTL_EXCEPTIONS_ENABLED + if (EASTL_UNLIKELY(n >= mNumElements)) + throw std::out_of_range("tuple_vector::at -- out of range"); +#elif EASTL_ASSERT_ENABLED + if (EASTL_UNLIKELY(n >= mNumElements)) + EASTL_FAIL_MSG("tuple_vector::at -- out of range"); +#endif + return reference_tuple(*(TupleVecLeaf::mpData + n)...); + } + + const_reference_tuple at(size_type n) const + { +#if EASTL_EXCEPTIONS_ENABLED + if (EASTL_UNLIKELY(n >= mNumElements)) + throw std::out_of_range("tuple_vector::at -- out of range"); +#elif EASTL_ASSERT_ENABLED + if (EASTL_UNLIKELY(n >= mNumElements)) + EASTL_FAIL_MSG("tuple_vector::at -- out of range"); +#endif + return const_reference_tuple(*(TupleVecLeaf::mpData + n)...); + } + + reference_tuple operator[](size_type n) { return at(n); } + const_reference_tuple operator[](size_type n) const { return at(n); } + + reference_tuple front() + { + #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED + if (EASTL_UNLIKELY(mNumElements == 0)) // We don't allow the user to reference an empty container. + EASTL_FAIL_MSG("tuple_vector::front -- empty vector"); + #else + // We allow the user to reference an empty container. + #endif + + return at(0); + } + + const_reference_tuple front() const + { + #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED + if (EASTL_UNLIKELY(mNumElements == 0)) // We don't allow the user to reference an empty container. + EASTL_FAIL_MSG("tuple_vector::front -- empty vector"); + #else + // We allow the user to reference an empty container. + #endif + + return at(0); + } + + reference_tuple back() + { + #if EASTL_ASSERT_ENABLED + if (EASTL_UNLIKELY(mNumElements == 0)) // We don't allow the user to reference an empty container. + EASTL_FAIL_MSG("tuple_vector::back -- empty vector"); + #endif + + return at(size() - 1); + } + + const_reference_tuple back() const + { + #if EASTL_ASSERT_ENABLED + if (EASTL_UNLIKELY(mNumElements == 0)) // We don't allow the user to reference an empty container. + EASTL_FAIL_MSG("tuple_vector::back -- empty vector"); + #endif + + return at(size() - 1); + } + + template + tuplevec_element_t* get() + { + typedef tuplevec_element_t Element; + return TupleVecLeaf::mpData; + } + template + const tuplevec_element_t* get() const + { + typedef tuplevec_element_t Element; + return TupleVecLeaf::mpData; + } + + template + T* get() + { + typedef tuplevec_index> Index; + return TupleVecLeaf::mpData; + } + template + const T* get() const + { + typedef tuplevec_index> Index; + return TupleVecLeaf::mpData; + } + + this_type& operator=(const this_type& other) + { + if (this != &other) + { + clear(); + assign(other.begin(), other.end()); + } + return *this; + } + + this_type& operator=(this_type&& other) + { + if (this != &other) + { + swap(other); + } + return *this; + } + + this_type& operator=(std::initializer_list iList) + { + assign(iList.begin(), iList.end()); + return *this; + } + + bool validate() const EA_NOEXCEPT + { + if (mNumElements > mNumCapacity) + return false; + if (!(variadicAnd(mpData <= TupleVecLeaf::mpData...))) + return false; + void* pDataEnd = (void*)((uintptr_t)mpData + internalDataSize()); + if (!(variadicAnd(pDataEnd >= TupleVecLeaf::mpData...))) + return false; + return true; + } + + int validate_iterator(const_iterator iter) const EA_NOEXCEPT + { + if (!(variadicAnd(iter.mpData[Indices] == TupleVecLeaf::mpData...))) + return isf_none; + if (iter.mIndex < mNumElements) + return (isf_valid | isf_current | isf_can_dereference); + if (iter.mIndex <= mNumElements) + return (isf_valid | isf_current); + return isf_none; + } + + static bool validate_iterator_pair(const_iterator first, const_iterator last) EA_NOEXCEPT + { + return (first.mIndex <= last.mIndex) && variadicAnd(first.mpData[Indices] == last.mpData[Indices]...); + } + + EASTL_INTERNAL_DISABLE_DEPRECATED() // 'unwrap_iterator': was declared deprecated + + template ::value, bool>::type> + EASTL_REMOVE_AT_2024_SEPT int validate_iterator(Iterator iter) const EA_NOEXCEPT { return validate_iterator(unwrap_iterator(iter)); } + + template ::value, bool>::type> + EASTL_REMOVE_AT_2024_SEPT static bool validate_iterator_pair(Iterator first, Iterator last) EA_NOEXCEPT { return validate_iterator_pair(unwrap_iterator(first), unwrap_iterator(last)); } + + EASTL_INTERNAL_RESTORE_DEPRECATED() + + allocator_type& get_allocator() EA_NOEXCEPT { return mDataSizeAndAllocator.second(); } + const allocator_type& get_allocator() const EA_NOEXCEPT { return mDataSizeAndAllocator.second(); } + + void set_allocator(const allocator_type& alloc) { mDataSizeAndAllocator.second() = alloc; } + +protected: + + void* mpData = nullptr; + size_type mNumElements = 0; + size_type mNumCapacity = 0; + + compressed_pair mDataSizeAndAllocator; + + size_type& internalDataSize() EA_NOEXCEPT { return mDataSizeAndAllocator.first(); } + size_type const& internalDataSize() const EA_NOEXCEPT { return mDataSizeAndAllocator.first(); } + + friend struct TupleRecurser<>; + template + friend struct TupleRecurser; + + template + void DoInitFromIterator(move_iterator begin, move_iterator end) + { +#if EASTL_ASSERT_ENABLED + if (EASTL_UNLIKELY(!validate_iterator_pair(begin.base(), end.base()))) + EASTL_FAIL_MSG("tuple_vector::erase -- invalid iterator pair"); +#endif + size_type newNumElements = (size_type)(end - begin); + const void* ppOtherData[sizeof...(Ts)] = { begin.base().mpData[Indices]... }; + size_type beginIdx = begin.base().mIndex; + size_type endIdx = end.base().mIndex; + DoConditionalReallocate(0, mNumCapacity, newNumElements); + mNumElements = newNumElements; + swallow((eastl::uninitialized_move(eastl::move_iterator((Ts*)(ppOtherData[Indices]) + beginIdx), + eastl::move_iterator((Ts*)(ppOtherData[Indices]) + endIdx), + TupleVecLeaf::mpData), 0)...); + } + + void DoInitFromIterator(const_iterator begin, const_iterator end) + { +#if EASTL_ASSERT_ENABLED + if (EASTL_UNLIKELY(!validate_iterator_pair(begin, end))) + EASTL_FAIL_MSG("tuple_vector::erase -- invalid iterator pair"); +#endif + size_type newNumElements = (size_type)(end - begin); + const void* ppOtherData[sizeof...(Ts)] = { begin.mpData[Indices]... }; + size_type beginIdx = begin.mIndex; + size_type endIdx = end.mIndex; + DoConditionalReallocate(0, mNumCapacity, newNumElements); + mNumElements = newNumElements; + swallow((eastl::uninitialized_copy((Ts*)(ppOtherData[Indices]) + beginIdx, + (Ts*)(ppOtherData[Indices]) + endIdx, + TupleVecLeaf::mpData), 0)...); + } + + void DoInitFillTuple(size_type n, const_reference_tuple tup) { DoInitFillArgs(n, eastl::get(tup)...); } + + void DoInitFillArgs(size_type n, const Ts&... args) + { + DoConditionalReallocate(0, mNumCapacity, n); + mNumElements = n; + swallow((eastl::uninitialized_fill(TupleVecLeaf::mpData, TupleVecLeaf::mpData + n, args), 0)...); + } + + void DoInitDefaultFill(size_type n) + { + DoConditionalReallocate(0, mNumCapacity, n); + mNumElements = n; + swallow((eastl::uninitialized_value_construct_n(TupleVecLeaf::mpData, n), 0)...); + } + + void DoInitFromTupleArray(const value_tuple* first, const value_tuple* last) + { +#if EASTL_ASSERT_ENABLED + if (EASTL_UNLIKELY(first > last || first == nullptr || last == nullptr)) + EASTL_FAIL_MSG("tuple_vector::ctor from tuple array -- invalid ptrs"); +#endif + size_type newNumElements = static_cast(last - first); + DoConditionalReallocate(0, mNumCapacity, newNumElements); + mNumElements = newNumElements; + DoUninitializedCopyFromTupleArray(begin(), end(), first); + } + + void DoCopyFromTupleArray(iterator destPos, iterator destEnd, const value_tuple* srcTuple) + { + // assign to constructed region + while (destPos < destEnd) + { + *destPos = *srcTuple; + ++destPos; + ++srcTuple; + } + } + + void DoUninitializedCopyFromTupleArray(iterator destPos, iterator destEnd, const value_tuple* srcTuple) + { + // placement-new/copy-ctor to unconstructed regions + while (destPos < destEnd) + { + swallow(::new(eastl::get(destPos.MakePointer())) Ts(eastl::get(*srcTuple))...); + ++destPos; + ++srcTuple; + } + } + + // Try to grow the size of the container "naturally" given the number of elements being used + void DoGrow(size_type oldNumElements, size_type oldNumCapacity, size_type requiredCapacity) + { + if (requiredCapacity > oldNumCapacity) + DoReallocate(oldNumElements, GetNewCapacity(requiredCapacity)); + } + + // Reallocate to the newCapacity (IFF it's actually larger, though) + void DoConditionalReallocate(size_type oldNumElements, size_type oldNumCapacity, size_type requiredCapacity) + { + if (requiredCapacity > oldNumCapacity) + DoReallocate(oldNumElements, requiredCapacity); + } + + void DoReallocate(size_type oldNumElements, size_type requiredCapacity) + { + void* ppNewLeaf[sizeof...(Ts)]; + pair allocation = TupleRecurser::template DoAllocate( + *this, ppNewLeaf, requiredCapacity, 0); + swallow((TupleVecLeaf::DoUninitializedMoveAndDestruct(0, oldNumElements, (Ts*)ppNewLeaf[Indices]), 0)...); + swallow(TupleVecLeaf::mpData = (Ts*)ppNewLeaf[Indices]...); + + EASTLFree(get_allocator(), mpData, internalDataSize()); + mpData = allocation.first; + mNumCapacity = requiredCapacity; + internalDataSize() = allocation.second; + } + + size_type GetNewCapacity(size_type oldNumCapacity) + { + return (oldNumCapacity > 0) ? (2 * oldNumCapacity) : 1; + } +}; + +} // namespace TupleVecInternal + +// Move_iterator specialization for TupleVecIter. +// An rvalue reference of a move_iterator would normaly be "tuple &&" whereas +// what we actually want is "tuple". This specialization gives us that. +template +class move_iterator, Ts...>> +{ +public: + typedef TupleVecInternal::TupleVecIter, Ts...> iterator_type; + typedef iterator_traits traits_type; + typedef typename traits_type::iterator_category iterator_category; + typedef typename traits_type::value_type value_type; + typedef typename traits_type::difference_type difference_type; + typedef typename traits_type::pointer pointer; + typedef tuple reference; + typedef move_iterator this_type; + +protected: + iterator_type mIterator; + +public: + move_iterator() : mIterator() {} + explicit move_iterator(iterator_type mi) : mIterator(mi) {} + + template + move_iterator(const move_iterator& mi) : mIterator(mi.base()) {} + + iterator_type base() const { return mIterator; } + reference operator*() const { return eastl::move(MakeReference()); } + pointer operator->() const { return mIterator; } + + this_type& operator++() { ++mIterator; return *this; } + this_type operator++(int) { + this_type tempMoveIterator = *this; + ++mIterator; + return tempMoveIterator; + } + + this_type& operator--() { --mIterator; return *this; } + this_type operator--(int) + { + this_type tempMoveIterator = *this; + --mIterator; + return tempMoveIterator; + } + + this_type operator+(difference_type n) const { return move_iterator(mIterator + n); } + this_type& operator+=(difference_type n) + { + mIterator += n; + return *this; + } + + this_type operator-(difference_type n) const { return move_iterator(mIterator - n); } + this_type& operator-=(difference_type n) + { + mIterator -= n; + return *this; + } + + difference_type operator-(const this_type& rhs) const { return mIterator - rhs.mIterator; } + bool operator<(const this_type& rhs) const { return mIterator < rhs.mIterator; } + bool operator>(const this_type& rhs) const { return mIterator > rhs.mIterator; } + bool operator>=(const this_type& rhs) const { return mIterator >= rhs.mIterator; } + bool operator<=(const this_type& rhs) const { return mIterator <= rhs.mIterator; } + + reference operator[](difference_type n) const { return *(*this + n); } + +private: + reference MakeReference() const + { + return reference(eastl::move(((Ts*)mIterator.mpData[Indices])[mIterator.mIndex])...); + } + + EASTL_INTERNAL_DISABLE_DEPRECATED() // 'is_iterator_wrapper': was declared deprecated + // Unwrapping interface, not part of the public API. + EASTL_REMOVE_AT_2024_SEPT iterator_type unwrap() const { return mIterator; } + + // The unwrapper helpers need access to unwrap(). + friend is_iterator_wrapper_helper; + friend is_iterator_wrapper; + EASTL_INTERNAL_RESTORE_DEPRECATED() +}; + +template +inline bool operator==(const TupleVecInternal::TupleVecImpl& a, + const TupleVecInternal::TupleVecImpl& b) +{ + return ((a.size() == b.size()) && eastl::equal(a.begin(), a.end(), b.begin())); +} + +template +inline bool operator!=(const TupleVecInternal::TupleVecImpl& a, + const TupleVecInternal::TupleVecImpl& b) +{ + return ((a.size() != b.size()) || !eastl::equal(a.begin(), a.end(), b.begin())); +} + +template +inline bool operator<(const TupleVecInternal::TupleVecImpl& a, + const TupleVecInternal::TupleVecImpl& b) +{ + return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end()); +} + +template +inline bool operator>(const TupleVecInternal::TupleVecImpl& a, + const TupleVecInternal::TupleVecImpl& b) +{ + return b < a; +} + +template +inline bool operator<=(const TupleVecInternal::TupleVecImpl& a, + const TupleVecInternal::TupleVecImpl& b) +{ + return !(b < a); +} + +template +inline bool operator>=(const TupleVecInternal::TupleVecImpl& a, + const TupleVecInternal::TupleVecImpl& b) +{ + return !(a < b); +} + +template +inline void swap(TupleVecInternal::TupleVecImpl& a, + TupleVecInternal::TupleVecImpl& b) +{ + a.swap(b); +} + +// A customization of swap is made for r-values of tuples-of-references - +// normally, swapping rvalues doesn't make sense, but in this case, we do want to +// swap the contents of what the tuple-of-references are referring to +// +// This is required due to TupleVecIter returning a value-type for its dereferencing, +// as opposed to an actual real reference of some sort +template +inline +typename enable_if...>::value>::type +swap(tuple&& a, tuple&& b) +{ + a.swap(b); +} + +template +inline +typename enable_if...>::value>::type +swap(tuple&& a, tuple&& b) = delete; + + +// External interface of tuple_vector +template +class tuple_vector : public TupleVecInternal::TupleVecImpl, Ts...> +{ + typedef tuple_vector this_type; + typedef TupleVecInternal::TupleVecImpl, Ts...> base_type; + using base_type::base_type; + +public: + this_type& operator=(std::initializer_list iList) + { + base_type::operator=(iList); + return *this; + } +}; + +// Variant of tuple_vector that allows a user-defined allocator type (can't mix default template params with variadics) +template +class tuple_vector_alloc + : public TupleVecInternal::TupleVecImpl, Ts...> +{ + typedef tuple_vector_alloc this_type; + typedef TupleVecInternal::TupleVecImpl, Ts...> base_type; + using base_type::base_type; + +public: + + this_type& operator=(std::initializer_list iList) + { + base_type::operator=(iList); + return *this; + } +}; + +} // namespace eastl + +EA_RESTORE_VC_WARNING() +EA_RESTORE_VC_WARNING() +EA_RESTORE_VC_WARNING() +EA_RESTORE_VC_WARNING() + +#endif // EASTL_TUPLEVECTOR_H diff --git a/external/EASTL/include/EASTL/chrono.h b/external/EASTL/include/EASTL/chrono.h new file mode 100644 index 00000000..024e8098 --- /dev/null +++ b/external/EASTL/include/EASTL/chrono.h @@ -0,0 +1,775 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +/////////////////////////////////////////////////////////////////////////////// +// This file implements the eastl::chrono specification which is part of the +// standard STL date and time library. eastl::chrono implements all the +// mechanisms required to capture and manipulate times retrieved from the +// provided clocks. It implements the all of the features to allow type safe +// durations to be used in code. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_CHRONO_H +#define EASTL_CHRONO_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +#include +#include +#include +#include + + +// TODO: move to platform specific cpp or header file +#if defined EA_PLATFORM_MICROSOFT + EA_DISABLE_ALL_VC_WARNINGS() + + #ifndef WIN32_LEAN_AND_MEAN + #define WIN32_LEAN_AND_MEAN + #endif + + #undef NOMINMAX + #define NOMINMAX + + #include + + #ifdef min + #undef min + #endif + #ifdef max + #undef max + #endif + + EA_RESTORE_ALL_VC_WARNINGS() +#endif + +#if defined(EA_PLATFORM_MICROSOFT) && !defined(EA_PLATFORM_MINGW) + // Nothing to do +#elif defined(EA_PLATFORM_SONY) + #include + #include +#elif defined(EA_PLATFORM_APPLE) + #include +#elif defined(EA_PLATFORM_POSIX) || defined(EA_PLATFORM_MINGW) || defined(EA_PLATFORM_ANDROID) + // Posix means Linux, Unix, and Macintosh OSX, among others (including Linux-based mobile platforms). + #if defined(EA_PLATFORM_MINGW) + #include + #endif + #include + #if (defined(CLOCK_REALTIME) || defined(CLOCK_MONOTONIC)) + #include + #else + #include + #include + #endif +#endif + + +namespace eastl +{ +namespace chrono +{ + /////////////////////////////////////////////////////////////////////////////// + // treat_as_floating_point + /////////////////////////////////////////////////////////////////////////////// + template + struct treat_as_floating_point : is_floating_point {}; + + + /////////////////////////////////////////////////////////////////////////////// + // 20.12.4, duration_values + /////////////////////////////////////////////////////////////////////////////// + template + struct duration_values + { + public: + EASTL_FORCE_INLINE static EA_CONSTEXPR Rep zero() { return Rep(0); } + EASTL_FORCE_INLINE static EA_CONSTEXPR Rep max() { return eastl::numeric_limits::max(); } + EASTL_FORCE_INLINE static EA_CONSTEXPR Rep min() { return eastl::numeric_limits::lowest(); } + }; + + + /////////////////////////////////////////////////////////////////////////////// + // duration fwd_decl + /////////////////////////////////////////////////////////////////////////////// + template > + class duration; + + + namespace Internal + { + /////////////////////////////////////////////////////////////////////////////// + // IsRatio + /////////////////////////////////////////////////////////////////////////////// + template struct IsRatio : eastl::false_type {}; + template struct IsRatio> : eastl::true_type {}; + template struct IsRatio> : eastl::true_type {}; + template struct IsRatio> : eastl::true_type {}; + template struct IsRatio> : eastl::true_type {}; + + + /////////////////////////////////////////////////////////////////////////////// + // IsDuration + /////////////////////////////////////////////////////////////////////////////// + template struct IsDuration : eastl::false_type{}; + template struct IsDuration> : eastl::true_type{}; + template struct IsDuration> : eastl::true_type{}; + template struct IsDuration> : eastl::true_type{}; + template struct IsDuration> : eastl::true_type{}; + + + /////////////////////////////////////////////////////////////////////////////// + // RatioGCD + /////////////////////////////////////////////////////////////////////////////// + template + struct RatioGCD + { + static_assert(IsRatio::value, "Period1 is not a eastl::ratio type"); + static_assert(IsRatio::value, "Period2 is not a eastl::ratio type"); + + typedef ratio::value, + eastl::Internal::lcm::value> type; + }; + }; + + + /////////////////////////////////////////////////////////////////////////////// + // 20.12.5.7, duration_cast + /////////////////////////////////////////////////////////////////////////////// + namespace Internal + { + template ::type, + typename CommonRep = typename eastl::decay::type>::type, + bool = CommonPeriod::num == 1, + bool = CommonPeriod::den == 1> + struct DurationCastImpl; + + template + struct DurationCastImpl + { + inline static ToDuration DoCast(const FromDuration& fd) + { + return ToDuration(static_cast(fd.count())); + } + }; + + template + struct DurationCastImpl + { + inline static ToDuration DoCast(const FromDuration& d) + { + return ToDuration(static_cast(static_cast(d.count()) * + static_cast(CommonPeriod::num))); + } + }; + + template + struct DurationCastImpl + { + inline static ToDuration DoCast(const FromDuration& d) + { + return ToDuration(static_cast(static_cast(d.count()) / + static_cast(CommonPeriod::den))); + } + }; + + template + struct DurationCastImpl + { + inline static ToDuration DoCast(const FromDuration& d) + { + return ToDuration(static_cast(static_cast(d.count()) * + static_cast(CommonPeriod::num) / + static_cast(CommonPeriod::den))); + } + }; + }; // namespace Internal + + + /////////////////////////////////////////////////////////////////////////////// + // duration_cast + /////////////////////////////////////////////////////////////////////////////// + template + inline typename eastl::enable_if::value, ToDuration>::type + duration_cast(const duration& d) + { + typedef typename duration::this_type FromDuration; + return Internal::DurationCastImpl::DoCast(d); + } + + + /////////////////////////////////////////////////////////////////////////////// + // duration + /////////////////////////////////////////////////////////////////////////////// + template + class duration + { + Rep mRep; + + public: + typedef Rep rep; + typedef Period period; + typedef duration this_type; + + #if defined(EA_COMPILER_NO_DEFAULTED_FUNCTIONS) + EA_CONSTEXPR duration() + : mRep() {} + + duration(const duration& other) + : mRep(Rep(other.mRep)) {} + + duration& operator=(const duration& other) + { mRep = other.mRep; return *this; } + #else + EA_CONSTEXPR duration() = default; + duration(const duration&) = default; + duration& operator=(const duration&) = default; + #endif + + + /////////////////////////////////////////////////////////////////////////////// + // conversion constructors + /////////////////////////////////////////////////////////////////////////////// + template + inline EA_CONSTEXPR explicit duration( + const Rep2& rep2, + typename eastl::enable_if::value && + (treat_as_floating_point::value || + !treat_as_floating_point::value)>::type** = 0) + : mRep(static_cast(rep2)) {} + + + template + EA_CONSTEXPR duration(const duration& d2, + typename eastl::enable_if::value || + (eastl::ratio_divide::type::den == 1 && + !treat_as_floating_point::value), + void>::type** = 0) + : mRep(duration_cast(d2).count()) {} + + /////////////////////////////////////////////////////////////////////////////// + // returns the count of ticks + /////////////////////////////////////////////////////////////////////////////// + EA_CONSTEXPR Rep count() const { return mRep; } + + /////////////////////////////////////////////////////////////////////////////// + // static accessors of special duration values + /////////////////////////////////////////////////////////////////////////////// + EA_CONSTEXPR inline static duration zero() { return duration(duration_values::zero()); } + EA_CONSTEXPR inline static duration min() { return duration(duration_values::min()); } + EA_CONSTEXPR inline static duration max() { return duration(duration_values::max()); } + + /////////////////////////////////////////////////////////////////////////////// + // const arithmetic operations + /////////////////////////////////////////////////////////////////////////////// + EA_CONSTEXPR inline duration operator+() const { return *this; } + EA_CONSTEXPR inline duration operator-() const { return duration(0-mRep); } + + /////////////////////////////////////////////////////////////////////////////// + // arithmetic operations + /////////////////////////////////////////////////////////////////////////////// + inline duration operator++(int) { return duration(mRep++); } + inline duration operator--(int) { return duration(mRep--); } + inline duration& operator++() { ++mRep; return *this; } + inline duration& operator--() { --mRep; return *this; } + inline duration& operator+=(const duration& d) { mRep += d.count(); return *this; } + inline duration& operator-=(const duration& d) { mRep -= d.count(); return *this; } + inline duration& operator*=(const Rep& rhs) { mRep *= rhs; return *this; } + inline duration& operator/=(const Rep& rhs) { mRep /= rhs; return *this; } + inline duration& operator%=(const Rep& rhs) { mRep %= rhs; return *this; } + inline duration& operator%=(const duration& d) { mRep %= d.count(); return *this; } + }; + + + /////////////////////////////////////////////////////////////////////////////// + // 20.12.5.5, arithmetic operations with durations as arguments + /////////////////////////////////////////////////////////////////////////////// + template + typename eastl::common_type, duration>::type EASTL_FORCE_INLINE + operator+(const duration& lhs, const duration& rhs) + { + typedef typename eastl::common_type, duration>::type common_duration_t; + return common_duration_t(common_duration_t(lhs).count() + common_duration_t(rhs).count()); + } + + template + typename eastl::common_type, duration>::type EASTL_FORCE_INLINE + operator-(const duration& lhs, const duration& rhs) + { + typedef typename eastl::common_type, duration>::type common_duration_t; + return common_duration_t(common_duration_t(lhs).count() - common_duration_t(rhs).count()); + } + + template + duration::type, Period1> EASTL_FORCE_INLINE + operator*(const duration& lhs, const Rep2& rhs) + { + typedef duration::type, Period1> common_duration_t; + return common_duration_t(common_duration_t(lhs).count() * rhs); + } + + template + duration::type, Period2> EASTL_FORCE_INLINE + operator*(const Rep1& lhs, const duration& rhs) + { + typedef duration::type, Period2> common_duration_t; + return common_duration_t(lhs * common_duration_t(rhs).count()); + } + + template + duration::type, Period1> EASTL_FORCE_INLINE + operator/(const duration& lhs, const Rep2& rhs) + { + typedef duration::type, Period1> common_duration_t; + return common_duration_t(common_duration_t(lhs).count() / rhs); + } + + template + typename eastl::common_type, duration>::type EASTL_FORCE_INLINE + operator/(const duration& lhs, const duration& rhs) + { + typedef typename eastl::common_type, duration>::type common_duration_t; + return common_duration_t(common_duration_t(lhs).count() / common_duration_t(rhs).count()); + } + + template + duration::type, Period1> EASTL_FORCE_INLINE + operator%(const duration& lhs, const Rep2& rhs) + { + typedef duration::type, Period1> common_duration_t; + return common_duration_t(common_duration_t(lhs).count() % rhs); + } + + template + typename eastl::common_type, duration>::type EASTL_FORCE_INLINE + operator%(const duration& lhs, const duration& rhs) + { + typedef typename eastl::common_type, duration>::type common_duration_t; + return common_duration_t(common_duration_t(lhs).count() % common_duration_t(rhs).count()); + } + + + /////////////////////////////////////////////////////////////////////////////// + // 20.12.5.6, compares two durations + /////////////////////////////////////////////////////////////////////////////// + template + EASTL_FORCE_INLINE bool operator==(const duration& lhs, + const duration& rhs) + { + typedef typename eastl::common_type, duration>::type common_duration_t; + return common_duration_t(lhs).count() == common_duration_t(rhs).count(); + } + + template + EASTL_FORCE_INLINE bool operator<(const duration& lhs, + const duration& rhs) + { + typedef typename eastl::common_type, duration>::type common_duration_t; + return common_duration_t(lhs).count() < common_duration_t(rhs).count(); + } + + template + EASTL_FORCE_INLINE bool operator!=(const duration& lhs, + const duration& rhs) + { + return !(lhs == rhs); + } + + template + EASTL_FORCE_INLINE bool operator<=(const duration& lhs, + const duration& rhs) + { + return !(rhs < lhs); + } + + template + EASTL_FORCE_INLINE bool operator>(const duration& lhs, + const duration& rhs) + { + return rhs < lhs; + } + + template + EASTL_FORCE_INLINE bool operator>=(const duration& lhs, + const duration& rhs) + { + return !(lhs < rhs); + } + + + /////////////////////////////////////////////////////////////////////////////// + // standard duration units + /////////////////////////////////////////////////////////////////////////////// + typedef duration nanoseconds; + typedef duration microseconds; + typedef duration milliseconds; + typedef duration seconds; + typedef duration> minutes; + typedef duration> hours; + + + /////////////////////////////////////////////////////////////////////////////// + // 20.12.6, time_point + /////////////////////////////////////////////////////////////////////////////// + template + class time_point + { + Duration mDuration; + + public: + typedef Clock clock; + typedef Duration duration; + typedef typename Duration::rep rep; + typedef typename Duration::period period; + + inline EA_CONSTEXPR time_point() : mDuration(Duration::zero()) {} + EA_CONSTEXPR explicit time_point(const Duration& other) : mDuration(other) {} + + template + inline EA_CONSTEXPR time_point( + const time_point& t, + typename eastl::enable_if::value>::type** = 0) + : mDuration(t.time_since_epoch()) {} + + EA_CONSTEXPR Duration time_since_epoch() const { return mDuration; } + + time_point& operator+=(const Duration& d) { mDuration += d; return *this; } + time_point& operator-=(const Duration& d) { mDuration -= d; return *this; } + + static EA_CONSTEXPR time_point min() { return time_point(Duration::min()); } + static EA_CONSTEXPR time_point max() { return time_point(Duration::max()); } + }; + + + /////////////////////////////////////////////////////////////////////////////// + // 20.12.6.5, time_point arithmetic + /////////////////////////////////////////////////////////////////////////////// + template + inline EA_CONSTEXPR time_point>::type> + operator+(const time_point& lhs, const duration& rhs) + { + typedef time_point>::type> common_timepoint_t; + return common_timepoint_t(lhs.time_since_epoch() + rhs); + } + + template + inline EA_CONSTEXPR time_point>::type> + operator+(const duration& lhs, const time_point& rhs) + { + typedef time_point>::type> common_timepoint_t; + return common_timepoint_t(lhs + rhs.time_since_epoch()); + } + + template + inline EA_CONSTEXPR time_point>::type> + operator-(const time_point& lhs, const duration& rhs) + { + typedef time_point>::type> common_timepoint_t; + return common_timepoint_t(lhs.time_since_epoch() - rhs); + } + + template + inline EA_CONSTEXPR typename eastl::common_type::type operator-( + const time_point& lhs, + const time_point& rhs) + { + return lhs.time_since_epoch() - rhs.time_since_epoch(); + } + + template + inline EA_CONSTEXPR bool operator==(const time_point& lhs, + const time_point& rhs) + { + return lhs.time_since_epoch() == rhs.time_since_epoch(); + } + + template + inline EA_CONSTEXPR bool operator!=(const time_point& lhs, + const time_point& rhs) + { + return !(lhs == rhs); + } + + template + inline EA_CONSTEXPR bool operator<(const time_point& lhs, const time_point& rhs) + { + return lhs.time_since_epoch() < rhs.time_since_epoch(); + } + + template + inline EA_CONSTEXPR bool operator<=(const time_point& lhs, + const time_point& rhs) + { + return !(rhs < lhs); + } + + template + inline EA_CONSTEXPR bool operator>(const time_point& lhs, const time_point& rhs) + { + return rhs < lhs; + } + + template + inline EA_CONSTEXPR bool operator>=(const time_point& lhs, + const time_point& rhs) + { + return !(lhs < rhs); + } + + + /////////////////////////////////////////////////////////////////////////////// + // 20.12.6.7, time_point_cast + /////////////////////////////////////////////////////////////////////////////// + template + EA_CONSTEXPR time_point time_point_cast( + const time_point& t, + typename eastl::enable_if::value>::type** = 0) + { + return time_point(duration_cast(t.time_since_epoch())); + } + + + /////////////////////////////////////////////////////////////////////////////// + // 20.12.7, clocks + /////////////////////////////////////////////////////////////////////////////// + + namespace Internal + { + #if defined(EA_PLATFORM_MICROSOFT) && !defined(EA_PLATFORM_MINGW) + #define EASTL_NS_PER_TICK 1 + #elif defined EA_PLATFORM_SONY + #define EASTL_NS_PER_TICK 1 + #elif defined EA_PLATFORM_POSIX + #define EASTL_NS_PER_TICK _XTIME_NSECS_PER_TICK + #else + #define EASTL_NS_PER_TICK 100 + #endif + + #if defined(EA_PLATFORM_POSIX) + typedef chrono::nanoseconds::period SystemClock_Period; + typedef chrono::nanoseconds::period SteadyClock_Period; + #else + typedef eastl::ratio_multiply, nano>::type SystemClock_Period; + typedef eastl::ratio_multiply, nano>::type SteadyClock_Period; + #endif + + + /////////////////////////////////////////////////////////////////////////////// + // Internal::GetTicks + /////////////////////////////////////////////////////////////////////////////// + inline uint64_t GetTicks() + { + #if defined EA_PLATFORM_MICROSOFT + auto queryFrequency = [] + { + LARGE_INTEGER frequency; + QueryPerformanceFrequency(&frequency); + return double(1000000000.0L / (long double)frequency.QuadPart); // nanoseconds per tick + }; + + auto queryCounter = [] + { + LARGE_INTEGER counter; + QueryPerformanceCounter(&counter); + return counter.QuadPart; + }; + + EA_DISABLE_VC_WARNING(4640) // warning C4640: construction of local static object is not thread-safe (VS2013) + static auto frequency = queryFrequency(); // cache cpu frequency on first call + EA_RESTORE_VC_WARNING() + return uint64_t(frequency * (double)queryCounter()); + #elif defined EA_PLATFORM_SONY + auto queryFrequency = [] + { + // nanoseconds/seconds / ticks/seconds + return double(1000000000.0L / (long double)sceKernelGetProcessTimeCounterFrequency()); // nanoseconds per tick + }; + + auto queryCounter = [] + { + return sceKernelGetProcessTimeCounter(); + }; + + EA_DISABLE_VC_WARNING(4640) // warning C4640: construction of local static object is not thread-safe (VS2013) + static auto frequency = queryFrequency(); // cache cpu frequency on first call + EA_RESTORE_VC_WARNING() + return uint64_t(frequency * (double)queryCounter()); + #elif defined(EA_PLATFORM_APPLE) + auto queryTimeInfo = [] + { + mach_timebase_info_data_t info; + mach_timebase_info(&info); + return info; + }; + + static auto timeInfo = queryTimeInfo(); + uint64_t t = mach_absolute_time(); + t *= timeInfo.numer; + t /= timeInfo.denom; + return t; + #elif defined(EA_PLATFORM_POSIX) // Posix means Linux, Unix, and Macintosh OSX, among others (including Linux-based mobile platforms). + #if (defined(CLOCK_REALTIME) || defined(CLOCK_MONOTONIC)) + timespec ts; + int result = clock_gettime(CLOCK_MONOTONIC, &ts); + + if (result == -1 && errno == EINVAL) + result = clock_gettime(CLOCK_REALTIME, &ts); + + const uint64_t nNanoseconds = (uint64_t)ts.tv_nsec + ((uint64_t)ts.tv_sec * UINT64_C(1000000000)); + return nNanoseconds; + #else + struct timeval tv; + gettimeofday(&tv, NULL); + const uint64_t nMicroseconds = (uint64_t)tv.tv_usec + ((uint64_t)tv.tv_sec * 1000000); + return nMicroseconds; + #endif + #else + #error "chrono not implemented for platform" + #endif + } + } // namespace Internal + + + /////////////////////////////////////////////////////////////////////////////// + // system_clock + /////////////////////////////////////////////////////////////////////////////// + class system_clock + { + public: + typedef long long rep; // signed arithmetic type representing the number of ticks in the clock's duration + typedef Internal::SystemClock_Period period; + typedef chrono::duration duration; // duration, capable of representing negative durations + typedef chrono::time_point time_point; + + // true if the time between ticks is always increases monotonically + EA_CONSTEXPR_OR_CONST static bool is_steady = false; + + // returns a time point representing the current point in time. + static time_point now() EA_NOEXCEPT + { + return time_point(duration(Internal::GetTicks())); + } + }; + + + /////////////////////////////////////////////////////////////////////////////// + // steady_clock + /////////////////////////////////////////////////////////////////////////////// + class steady_clock + { + public: + typedef long long rep; // signed arithmetic type representing the number of ticks in the clock's duration + typedef Internal::SteadyClock_Period period; + typedef chrono::duration duration; // duration, capable of representing negative durations + typedef chrono::time_point time_point; + + // true if the time between ticks is always increases monotonically + EA_CONSTEXPR_OR_CONST static bool is_steady = true; + + // returns a time point representing the current point in time. + static time_point now() EA_NOEXCEPT + { + return time_point(duration(Internal::GetTicks())); + } + }; + + + /////////////////////////////////////////////////////////////////////////////// + // high_resolution_clock + /////////////////////////////////////////////////////////////////////////////// + typedef system_clock high_resolution_clock; + + +} // namespace chrono + + + /////////////////////////////////////////////////////////////////////////////// + // duration common_type specialization + /////////////////////////////////////////////////////////////////////////////// + template + struct common_type, chrono::duration> + { + typedef chrono::duration::type>::type, + typename chrono::Internal::RatioGCD::type> type; + }; + + + /////////////////////////////////////////////////////////////////////////////// + // time_point common_type specialization + /////////////////////////////////////////////////////////////////////////////// + template + struct common_type, chrono::time_point> + { + typedef chrono::time_point::type> type; + }; + + + /////////////////////////////////////////////////////////////////////////////// + // chrono_literals + /////////////////////////////////////////////////////////////////////////////// + #if EASTL_USER_LITERALS_ENABLED && EASTL_INLINE_NAMESPACES_ENABLED + // Disabling the Clang/GCC/MSVC warning about using user + // defined literals without a leading '_' as they are reserved + // for standard libary usage. + EA_DISABLE_VC_WARNING(4455) + EA_DISABLE_CLANG_WARNING(-Wuser-defined-literals) + EA_DISABLE_GCC_WARNING(-Wliteral-suffix) + inline namespace literals + { + inline namespace chrono_literals + { + /////////////////////////////////////////////////////////////////////////////// + // integer chrono literals + /////////////////////////////////////////////////////////////////////////////// + EA_CONSTEXPR chrono::hours operator"" h(unsigned long long h) { return chrono::hours(h); } + EA_CONSTEXPR chrono::minutes operator"" min(unsigned long long m) { return chrono::minutes(m); } + EA_CONSTEXPR chrono::seconds operator"" s(unsigned long long s) { return chrono::seconds(s); } + EA_CONSTEXPR chrono::milliseconds operator"" ms(unsigned long long ms) { return chrono::milliseconds(ms); } + EA_CONSTEXPR chrono::microseconds operator"" us(unsigned long long us) { return chrono::microseconds(us); } + EA_CONSTEXPR chrono::nanoseconds operator"" ns(unsigned long long ns) { return chrono::nanoseconds(ns); } + + /////////////////////////////////////////////////////////////////////////////// + // float chrono literals + /////////////////////////////////////////////////////////////////////////////// + EA_CONSTEXPR chrono::duration> operator"" h(long double h) + { return chrono::duration>(h); } + EA_CONSTEXPR chrono::duration> operator"" min(long double m) + { return chrono::duration>(m); } + EA_CONSTEXPR chrono::duration operator"" s(long double s) + { return chrono::duration(s); } + EA_CONSTEXPR chrono::duration operator"" ms(long double ms) + { return chrono::duration(ms); } + EA_CONSTEXPR chrono::duration operator"" us(long double us) + { return chrono::duration(us); } + EA_CONSTEXPR chrono::duration operator"" ns(long double ns) + { return chrono::duration(ns); } + + } // namespace chrono_literals + }// namespace literals + EA_RESTORE_GCC_WARNING() // -Wliteral-suffix + EA_RESTORE_CLANG_WARNING() // -Wuser-defined-literals + EA_RESTORE_VC_WARNING() // warning: 4455 + #endif + +} // namespace eastl + + +#if EASTL_USER_LITERALS_ENABLED && EASTL_INLINE_NAMESPACES_ENABLED +namespace chrono +{ + using namespace eastl::literals::chrono_literals; +} // namespace chrono +#endif + + +#endif diff --git a/external/EASTL/include/EASTL/compare.h b/external/EASTL/include/EASTL/compare.h new file mode 100644 index 00000000..9bc3bd69 --- /dev/null +++ b/external/EASTL/include/EASTL/compare.h @@ -0,0 +1,45 @@ +/////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_COMPARE_H +#define EASTL_COMPARE_H + + +#include + +namespace eastl +{ + +#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON) + struct synth_three_way + { + template + constexpr auto operator()(const T& t, const U& u) const requires requires + { + {t < u}->std::convertible_to; + {u < t}->std::convertible_to; + } + { + if constexpr (std::three_way_comparable_with) + { + return t <=> u; + } + else + { + return (t < u) ? std::weak_ordering::less : + (u < t) ? std::weak_ordering::greater : + std::weak_ordering::equivalent; + } + } + }; + + template + using synth_three_way_result = decltype(synth_three_way{}(declval(), declval())); +#endif + +} // namespace eastl + + +#endif // Header include guard \ No newline at end of file diff --git a/external/EASTL/include/EASTL/core_allocator.h b/external/EASTL/include/EASTL/core_allocator.h new file mode 100644 index 00000000..e4374912 --- /dev/null +++ b/external/EASTL/include/EASTL/core_allocator.h @@ -0,0 +1,70 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_CORE_ALLOCATOR_H +#define EASTL_CORE_ALLOCATOR_H + +#if EASTL_CORE_ALLOCATOR_ENABLED + +#include + +namespace EA +{ + namespace Allocator + { + /// EASTLCoreAllocatorImpl + /// + /// EASTL provides an out of the box implementation of the + /// ICoreAllocator interface. This is provided as a convenience for + /// users who wish to provide ICoreAllocator implementations for EASTL to use. + /// + /// EASTL has a dependency on coreallocator so to provide an out of + /// the box implementation for EASTLCoreAlloctor and EASTLCoreDeleter + /// that can be used and tested. Historically we could not test + /// ICoreAllocator interface because we relied on the code being linked + /// in user code. + /// + + class EASTLCoreAllocatorImpl : public ICoreAllocator + { + public: + virtual void* Alloc(size_t size, const char* name, unsigned int flags) + { + return ::operator new[](size, name, flags, 0, __FILE__, __LINE__); + } + + virtual void* Alloc(size_t size, const char* name, unsigned int flags, unsigned int alignment, unsigned int alignOffset = 0) + { + return ::operator new[](size, alignment, alignOffset, name, flags, 0, __FILE__, __LINE__); + } + + virtual void Free(void* ptr, size_t size = 0) + { + ::operator delete(static_cast(ptr)); + } + + virtual void* AllocDebug(size_t size, const DebugParams debugParams, unsigned int flags) + { + return Alloc(size, debugParams.mName, flags); + } + + virtual void* AllocDebug(size_t size, const DebugParams debugParams, unsigned int flags, unsigned int align, unsigned int alignOffset = 0) + { + return Alloc(size, debugParams.mName, flags, align, alignOffset); + } + + static EASTLCoreAllocatorImpl* GetDefaultAllocator(); + }; + + inline EASTLCoreAllocatorImpl* EASTLCoreAllocatorImpl::GetDefaultAllocator() + { + static EASTLCoreAllocatorImpl allocator; + return &allocator; + } + } +} + +#endif // EASTL_CORE_ALLOCATOR_ENABLED +#endif // EASTL_CORE_ALLOCATOR_H + diff --git a/external/EASTL/include/EASTL/core_allocator_adapter.h b/external/EASTL/include/EASTL/core_allocator_adapter.h new file mode 100644 index 00000000..d6f18275 --- /dev/null +++ b/external/EASTL/include/EASTL/core_allocator_adapter.h @@ -0,0 +1,368 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// Implements an EASTL allocator that uses an ICoreAllocator. +// However, this header file is not dependent on ICoreAllocator or its package. +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_CORE_ALLOCATOR_ADAPTER_H +#define EASTL_CORE_ALLOCATOR_ADAPTER_H + +#if EASTL_CORE_ALLOCATOR_ENABLED + + +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + +/// EASTL_CORE_ALLOCATOR_ADAPTER_GET_DEFAULT_CORE_ALLOCATOR +/// +/// This allows the application to override the default name for the default global core allocator. +/// However, you must be careful in your usage of this, as if this file is shared between uses then +/// you will need to be careful that your override of this doesn't conflict with others. +/// +#ifndef EASTL_CORE_ALLOCATOR_ADAPTER_GET_DEFAULT_CORE_ALLOCATOR + #define EASTL_CORE_ALLOCATOR_ADAPTER_GET_DEFAULT_CORE_ALLOCATOR AllocatorType::GetDefaultAllocator +#endif + + + +namespace EA +{ + namespace Allocator + { + /// CoreAllocatorAdapter + /// + /// Implements the EASTL allocator interface. + /// Allocates memory from an instance of ICoreAllocator or another class with an equivalent interface. + /// ICoreAllocator is a pure-virtual memory allocation interface used by a number of EA games and + /// shared libraries. It's completely unrelated to EASTL, but it's prevalent enough that it's useful + /// for EASTL to have a built-in adapter for this interface. ICoreAllocator is declared in the + /// CoreAllocator package icoreallocator_interface.h header, but CoreAllocatorAdapter can work with + /// any equivalent interface, as defined below. + /// + /// Expected interface: + /// enum AllocFlags { + /// kFlagTempMemory = 0, + /// kFlagPermMemory = 1 + /// }; + /// + /// struct CoreAllocator { + /// void* Alloc(size_t size, const char* name, unsigned int allocFlags); + /// void* Alloc(size_t size, const char* name, unsigned int allocFlags, // Not required unless you are working with types that require custom alignment. + /// unsigned int align, unsigned int alignOffset = 0); + /// void Free(void* block, size_t size = 0); + /// static CoreAllocator* GetDefaultAllocator(); + /// }; + /// + /// Example usage: + /// #include + /// typedef EA::Allocator::CoreAllocatorAdapter Adapter; + /// eastl::list widgetList(Adapter("UI/WidgetList", pSomeCoreAllocator)); + /// widgetList.push_back(Widget()); + /// + /// Example usage: + /// #include + /// eastl::list > widgetList; + /// widgetList.push_back(Widget()); + /// + /// Example usage: + /// #include + /// typedef EA::Allocator::CoreAllocatorAdapter Adapter; + /// typedef eastl::list WidgetList; + /// CoreAllocatorFixed widgetCoreAllocator(pFixedAllocatorForWidgetListValueType); // CoreAllocatorFixed is a hypothetical implementation of the ICoreAllocator interface. + /// WidgetList widgetList(Adapter("UI/WidgetList", &widgetCoreAllocator)); // Note that the widgetCoreAllocator is declared before and thus destroyed after the widget list. + /// + template + class CoreAllocatorAdapter + { + public: + typedef CoreAllocatorAdapter this_type; + + public: + // To do: Make this constructor explicit, when there is no known code dependent on it being otherwise. + CoreAllocatorAdapter(const char* pName = EASTL_NAME_VAL(EASTL_ALLOCATOR_DEFAULT_NAME), AllocatorType* pAllocator = EASTL_CORE_ALLOCATOR_ADAPTER_GET_DEFAULT_CORE_ALLOCATOR()); + CoreAllocatorAdapter(const char* pName, AllocatorType* pAllocator, int flags); + CoreAllocatorAdapter(const CoreAllocatorAdapter& x); + CoreAllocatorAdapter(const CoreAllocatorAdapter& x, const char* pName); + + CoreAllocatorAdapter& operator=(const CoreAllocatorAdapter& x); + + void* allocate(size_t n, int flags = 0); + void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0); + void deallocate(void* p, size_t n); + + AllocatorType* get_allocator() const; + void set_allocator(AllocatorType* pAllocator); + + int get_flags() const; + void set_flags(int flags); + + const char* get_name() const; + void set_name(const char* pName); + + public: // Public because otherwise VC++ generates (possibly invalid) warnings about inline friend template specializations. + AllocatorType* mpCoreAllocator; + int mnFlags; // Allocation flags. See ICoreAllocator/AllocFlags. + + #if EASTL_NAME_ENABLED + const char* mpName; // Debug name, used to track memory. + #endif + }; + + template + bool operator==(const CoreAllocatorAdapter& a, const CoreAllocatorAdapter& b); + + template + bool operator!=(const CoreAllocatorAdapter& a, const CoreAllocatorAdapter& b); + + + + /// EASTLICoreAllocator + /// + /// Provides a standardized typedef for ICoreAllocator; + /// + /// Example usage: + /// eastl::list widgetList("UI/WidgetList", pSomeCoreAllocator); + /// widgetList.push_back(Widget()); + /// + class ICoreAllocator; + class EASTLCoreAllocatorImpl; + + typedef CoreAllocatorAdapter EASTLICoreAllocatorAdapter; + typedef CoreAllocatorAdapter EASTLCoreAllocatorAdapter; + typedef EASTLICoreAllocatorAdapter EASTLICoreAllocator; // for backwards compatibility + + + + /// EASTLICoreDeleter + /// + /// Implements a functor which can free memory from the specified + /// ICoreAllocator interface. This is a convenience object provided for + /// users who wish to have EASTL containers deallocate memory obtained from + /// ICoreAllocator interfaces. + /// + template + class CoreDeleterAdapter + { + public: + typedef CoreDeleterAdapter this_type; + AllocatorType* mpCoreAllocator; + + public: + CoreDeleterAdapter(AllocatorType* pAllocator = EASTL_CORE_ALLOCATOR_ADAPTER_GET_DEFAULT_CORE_ALLOCATOR()) EA_NOEXCEPT + : mpCoreAllocator(pAllocator) {} + + ~CoreDeleterAdapter() EA_NOEXCEPT {} + + template + void operator()(T* p) + { + p->~T(); + mpCoreAllocator->Free(p); + } + + CoreDeleterAdapter(const CoreDeleterAdapter& in) { mpCoreAllocator = in.mpCoreAllocator; } + + CoreDeleterAdapter(CoreDeleterAdapter&& in) + { + mpCoreAllocator = in.mpCoreAllocator; + in.mpCoreAllocator = nullptr; + } + + CoreDeleterAdapter& operator=(const CoreDeleterAdapter& in) + { + mpCoreAllocator = in.mpCoreAllocator; + return *this; + } + + CoreDeleterAdapter& operator=(CoreDeleterAdapter&& in) + { + mpCoreAllocator = in.mpCoreAllocator; + in.mpCoreAllocator = nullptr; + return *this; + } + }; + + + + /// EASTLICoreDeleter + /// + /// Provides a standardized typedef for ICoreAllocator implementations. + /// + /// Example usage: + /// eastl::shared_ptr foo(pA, EASTLCoreDeleter()); + /// + typedef CoreDeleterAdapter EASTLICoreDeleterAdapter; + typedef CoreDeleterAdapter EASTLCoreDeleterAdapter; + + } // namespace Allocator + +} // namespace EA + + + + + +/////////////////////////////////////////////////////////////////////////////// +// Inlines +/////////////////////////////////////////////////////////////////////////////// + +namespace EA +{ + namespace Allocator + { + template + inline CoreAllocatorAdapter::CoreAllocatorAdapter(const char* EASTL_NAME(pName), AllocatorType* pCoreAllocator) + : mpCoreAllocator(pCoreAllocator), mnFlags(0) + { + #if EASTL_NAME_ENABLED + mpName = pName ? pName : EASTL_ALLOCATOR_DEFAULT_NAME; + #endif + } + + template + inline CoreAllocatorAdapter::CoreAllocatorAdapter(const char* EASTL_NAME(pName), AllocatorType* pCoreAllocator, int flags) + : mpCoreAllocator(pCoreAllocator), mnFlags(flags) + { + #if EASTL_NAME_ENABLED + mpName = pName ? pName : EASTL_ALLOCATOR_DEFAULT_NAME; + #endif + } + + template + inline CoreAllocatorAdapter::CoreAllocatorAdapter(const CoreAllocatorAdapter& x) + : mpCoreAllocator(x.mpCoreAllocator), mnFlags(x.mnFlags) + { + #if EASTL_NAME_ENABLED + mpName = x.mpName; + #endif + } + + template + inline CoreAllocatorAdapter::CoreAllocatorAdapter(const CoreAllocatorAdapter& x, const char* EASTL_NAME(pName)) + : mpCoreAllocator(x.mpCoreAllocator), mnFlags(x.mnFlags) + { + #if EASTL_NAME_ENABLED + mpName = pName ? pName : EASTL_ALLOCATOR_DEFAULT_NAME; + #endif + } + + template + inline CoreAllocatorAdapter& CoreAllocatorAdapter::operator=(const CoreAllocatorAdapter& x) + { + mpCoreAllocator = x.mpCoreAllocator; + mnFlags = x.mnFlags; + + #if EASTL_NAME_ENABLED + mpName = x.mpName; + #endif + + return *this; + } + + template + inline void* CoreAllocatorAdapter::allocate(size_t n, int /*flags*/) + { + // It turns out that EASTL itself doesn't use the flags parameter, + // whereas the user here might well want to specify a flags + // parameter. So we use ours instead of the one passed in. + return mpCoreAllocator->Alloc(n, EASTL_NAME_VAL(mpName), (unsigned)mnFlags); + } + + template + inline void* CoreAllocatorAdapter::allocate(size_t n, size_t alignment, size_t offset, int /*flags*/) + { + // It turns out that EASTL itself doesn't use the flags parameter, + // whereas the user here might well want to specify a flags + // parameter. So we use ours instead of the one passed in. + return mpCoreAllocator->Alloc(n, EASTL_NAME_VAL(mpName), (unsigned)mnFlags, (unsigned)alignment, (unsigned)offset); + } + + template + inline void CoreAllocatorAdapter::deallocate(void* p, size_t n) + { + return mpCoreAllocator->Free(p, n); + } + + template + inline AllocatorType* CoreAllocatorAdapter::get_allocator() const + { + return mpCoreAllocator; + } + + template + inline void CoreAllocatorAdapter::set_allocator(AllocatorType* pAllocator) + { + mpCoreAllocator = pAllocator; + } + + template + inline int CoreAllocatorAdapter::get_flags() const + { + return mnFlags; + } + + template + inline void CoreAllocatorAdapter::set_flags(int flags) + { + mnFlags = flags; + } + + template + inline const char* CoreAllocatorAdapter::get_name() const + { + #if EASTL_NAME_ENABLED + return mpName; + #else + return EASTL_ALLOCATOR_DEFAULT_NAME; + #endif + } + + template + inline void CoreAllocatorAdapter::set_name(const char* pName) + { + #if EASTL_NAME_ENABLED + mpName = pName; + #else + (void)pName; + #endif + } + + + + template + inline bool operator==(const CoreAllocatorAdapter& a, const CoreAllocatorAdapter& b) + { + return (a.mpCoreAllocator == b.mpCoreAllocator) && + (a.mnFlags == b.mnFlags); + } + + template + inline bool operator!=(const CoreAllocatorAdapter& a, const CoreAllocatorAdapter& b) + { + return (a.mpCoreAllocator != b.mpCoreAllocator) || + (a.mnFlags != b.mnFlags); + } + + + } // namespace Allocator + +} // namespace EA + + +#endif // EASTL_CORE_ALLOCATOR_ENABLED +#endif // Header include guard + + + + + + + + diff --git a/external/EASTL/include/EASTL/deque.h b/external/EASTL/include/EASTL/deque.h new file mode 100644 index 00000000..fe361532 --- /dev/null +++ b/external/EASTL/include/EASTL/deque.h @@ -0,0 +1,2911 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +////////////////////////////////////////////////////////////////////////////// +// deque design +// +// A deque (pronounced "deck") is a double-ended queue, though this is partially +// of a misnomer. A deque does indeed let you add and remove values from both ends +// of the container, but it's not usually used for such a thing and instead is used +// as a more flexible version of a vector. It provides operator[] (random access) +// and can insert items anywhere and not just at the front and back. +// +// While you can implement a double-ended queue via a doubly-linked list, deque is +// instead implemented as a list of arrays. The benefit of this is that memory usage +// is lower and that random access can be had with decent efficiency. +// +// Our implementation of deque is just like every other implementation of deque, +// as the C++ standard all but dictates that you make it work this way. Below +// we have a depiction of an array (or vector) of 48 items, with each node being +// a '+' character and extra capacity being a '-' character. What we have is one +// contiguous block of memory: +// +// ++++++++++++++++++++++++++++++++++++++++++++++++----------------- +// 0 47 +// +// With a deque, the same array of 48 items would be implemented as multiple smaller +// arrays of contiguous memory, each of fixed size. We will call these "sub-arrays." +// In the case here, we have six arrays of 8 nodes: +// +// ++++++++ ++++++++ ++++++++ ++++++++ ++++++++ ++++++++ +// +// With an vector, item [0] is the first item and item [47] is the last item. With a +// deque, item [0] is usually not the first item and neither is item [47]. There is +// extra capacity on both the front side and the back side of the deque. So a deque +// (of 24 items) actually looks like this: +// +// -------- -----+++ ++++++++ ++++++++ +++++--- -------- +// 0 23 +// +// To insert items at the front, you move into the capacity on the left, and to insert +// items at the back, you append items on the right. As you can see, inserting an item +// at the front doesn't require allocating new memory nor does it require moving any +// items in the container. It merely involves moving the pointer to the [0] item to +// the left by one node. +// +// We keep track of these sub-arrays by having an array of pointers, with each array +// entry pointing to each of the sub-arrays. We could alternatively use a linked +// list of pointers, but it turns out we can implement our deque::operator[] more +// efficiently if we use an array of pointers instead of a list of pointers. +// +// To implement deque::iterator, we could keep a struct which is essentially this: +// struct iterator { +// int subArrayIndex; +// int subArrayOffset; +// } +// +// In practice, we implement iterators a little differently, but in reality our +// implementation isn't much different from the above. It turns out that it's most +// simple if we also manage the location of item [0] and item [end] by using these +// same iterators. +// +// To consider: Implement the deque as a circular deque instead of a linear one. +// This would use a similar subarray layout but iterators would +// wrap around when they reached the end of the subarray pointer list. +// +////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_DEQUE_H +#define EASTL_DEQUE_H + + +#include +#include +#include +#include +#include +#include +#include + +EA_DISABLE_ALL_VC_WARNINGS() +#include +#include +EA_RESTORE_ALL_VC_WARNINGS() + +#if EASTL_EXCEPTIONS_ENABLED + EA_DISABLE_ALL_VC_WARNINGS() + #include // std::out_of_range, std::length_error. + EA_RESTORE_ALL_VC_WARNINGS() +#endif + + +// 4267 - 'argument' : conversion from 'size_t' to 'const uint32_t', possible loss of data. This is a bogus warning resulting from a bug in VC++. +// 4345 - Behavior change: an object of POD type constructed with an initializer of the form () will be default-initialized +// 4480 - nonstandard extension used: specifying underlying type for enum +// 4530 - C++ exception handler used, but unwind semantics are not enabled. Specify /EHsc +// 4571 - catch(...) semantics changed since Visual C++ 7.1; structured exceptions (SEH) are no longer caught. +EA_DISABLE_VC_WARNING(4267 4345 4480 4530 4571); + +#if EASTL_EXCEPTIONS_ENABLED + // 4703 - potentially uninitialized local pointer variable used. VC++ is mistakenly analyzing the possibility of uninitialized variables, though it's not easy for it to do so. + // 4701 - potentially uninitialized local variable used. + EA_DISABLE_VC_WARNING(4703 4701) +#endif + + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + +namespace eastl +{ + + /// EASTL_DEQUE_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// + #ifndef EASTL_DEQUE_DEFAULT_NAME + #define EASTL_DEQUE_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " deque" // Unless the user overrides something, this is "EASTL deque". + #endif + + + /// EASTL_DEQUE_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_DEQUE_DEFAULT_ALLOCATOR + #define EASTL_DEQUE_DEFAULT_ALLOCATOR allocator_type(EASTL_DEQUE_DEFAULT_NAME) + #endif + + + /// DEQUE_DEFAULT_SUBARRAY_SIZE + /// + /// Defines the default number of items in a subarray. + /// Note that the user has the option of specifying the subarray size + /// in the deque template declaration. + /// + #if !defined(__GNUC__) || (__GNUC__ >= 3) // GCC 2.x can't handle the declaration below. + #define DEQUE_DEFAULT_SUBARRAY_SIZE(T) ((sizeof(T) <= 4) ? 64 : ((sizeof(T) <= 8) ? 32 : ((sizeof(T) <= 16) ? 16 : ((sizeof(T) <= 32) ? 8 : 4)))) + #else + #define DEQUE_DEFAULT_SUBARRAY_SIZE(T) 16 + #endif + + + + /// DequeIterator + /// + /// The DequeIterator provides both const and non-const iterators for deque. + /// It also is used for the tracking of the begin and end for the deque. + /// + template + struct DequeIterator + { + typedef DequeIterator this_type; + typedef DequeIterator iterator; + typedef DequeIterator const_iterator; + typedef ptrdiff_t difference_type; + typedef EASTL_ITC_NS::random_access_iterator_tag iterator_category; + typedef T value_type; + typedef T* pointer; + typedef T& reference; + + public: + DequeIterator(); + DequeIterator(const iterator& x); + DequeIterator& operator=(const iterator& x); + + pointer operator->() const; + reference operator*() const; + + this_type& operator++(); + this_type operator++(int); + + this_type& operator--(); + this_type operator--(int); + + this_type& operator+=(difference_type n); + this_type& operator-=(difference_type n); + + this_type operator+(difference_type n) const; + this_type operator-(difference_type n) const; + + protected: + template + friend struct DequeIterator; + + template + friend struct DequeBase; + + template + friend class deque; + + template + friend bool operator==(const DequeIterator&, + const DequeIterator&); + + template + friend bool operator!=(const DequeIterator&, + const DequeIterator&); + + template + friend bool operator!=(const DequeIterator& a, + const DequeIterator& b); + + template + friend bool operator< (const DequeIterator&, + const DequeIterator&); + + template + friend bool operator> (const DequeIterator&, + const DequeIterator&); + + template + friend bool operator<=(const DequeIterator&, + const DequeIterator&); + + template + friend bool operator>=(const DequeIterator&, + const DequeIterator&); + + template + friend typename DequeIterator::difference_type + operator-(const DequeIterator& a, + const DequeIterator& b); + + protected: + T* mpCurrent; // Where we currently point. Declared first because it's used most often. + T* mpBegin; // The beginning of the current subarray. + T* mpEnd; // The end of the current subarray. To consider: remove this member, as it is always equal to 'mpBegin + kDequeSubarraySize'. Given that deque subarrays usually consist of hundreds of bytes, this isn't a massive win. Also, now that we are implementing a zero-allocation new deque policy, mpEnd may in fact not be equal to 'mpBegin + kDequeSubarraySize'. + T** mpCurrentArrayPtr; // Pointer to current subarray. We could alternatively implement this as a list node iterator if the deque used a linked list. + + struct Increment {}; + struct Decrement {}; + struct FromConst {}; + + DequeIterator(T** pCurrentArrayPtr, T* pCurrent); + DequeIterator(const const_iterator& x, FromConst) : mpCurrent(x.mpCurrent), mpBegin(x.mpBegin), mpEnd(x.mpEnd), mpCurrentArrayPtr(x.mpCurrentArrayPtr){} + DequeIterator(const iterator& x, Increment); + DequeIterator(const iterator& x, Decrement); + + this_type move(const iterator& first, const iterator& last, true_type); // true means that value_type has the type_trait is_trivially_copyable, + this_type move(const iterator& first, const iterator& last, false_type); // false means it does not. + + void move_backward(const iterator& first, const iterator& last, true_type); // true means that value_type has the type_trait is_trivially_copyable, + void move_backward(const iterator& first, const iterator& last, false_type); // false means it does not. + + void SetSubarray(T** pCurrentArrayPtr); + }; + + + + + /// DequeBase + /// + /// The DequeBase implements memory allocation for deque. + /// See VectorBase (class vector) for an explanation of why we + /// create this separate base class. + /// + template + struct DequeBase + { + typedef T value_type; + typedef Allocator allocator_type; + typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t. + typedef ptrdiff_t difference_type; + typedef DequeIterator iterator; + typedef DequeIterator const_iterator; + + static const size_type npos = (size_type)-1; /// 'npos' means non-valid position or simply non-position. + static const size_type kMaxSize = (size_type)-2; /// -1 is reserved for 'npos'. It also happens to be slightly beneficial that kMaxSize is a value less than -1, as it helps us deal with potential integer wraparound issues. + + enum + { + kMinPtrArraySize = 8, /// A new empty deque has a ptrArraySize of 0, but any allocated ptrArrays use this min size. + kSubarraySize = kDequeSubarraySize /// + //kNodeSize = kDequeSubarraySize * sizeof(T) /// Disabled because it prevents the ability to do this: struct X{ eastl::deque mDequeOfSelf; }; + }; + + protected: + enum Side /// Defines the side of the deque: front or back. + { + kSideFront, /// Identifies the front side of the deque. + kSideBack /// Identifies the back side of the deque. + }; + + T** mpPtrArray; // Array of pointers to subarrays. + size_type mnPtrArraySize; // Possibly we should store this as T** mpArrayEnd. + iterator mItBegin; // Where within the subarrays is our beginning. + iterator mItEnd; // Where within the subarrays is our end. + allocator_type mAllocator; // To do: Use base class optimization to make this go away. + + public: + DequeBase(const allocator_type& allocator); + DequeBase(size_type n); + DequeBase(size_type n, const allocator_type& allocator); + ~DequeBase(); + + const allocator_type& get_allocator() const EA_NOEXCEPT; + allocator_type& get_allocator() EA_NOEXCEPT; + void set_allocator(const allocator_type& allocator); + + protected: + T* DoAllocateSubarray(); + void DoFreeSubarray(T* p); + void DoFreeSubarrays(T** pBegin, T** pEnd); + + T** DoAllocatePtrArray(size_type n); + void DoFreePtrArray(T** p, size_t n); + + iterator DoReallocSubarray(size_type nAdditionalCapacity, Side allocationSide); + void DoReallocPtrArray(size_type nAdditionalCapacity, Side allocationSide); + + void DoInit(size_type n); + + }; // DequeBase + + + + + /// deque + /// + /// Implements a conventional C++ double-ended queue. The implementation used here + /// is very much like any other deque implementations you may have seen, as it + /// follows the standard algorithm for deque design. + /// + /// Note: + /// As of this writing, deque does not support zero-allocation initial emptiness. + /// A newly created deque with zero elements will still allocate a subarray + /// pointer set. We are looking for efficient and clean ways to get around this, + /// but current efforts have resulted in less efficient and more fragile code. + /// The logic of this class doesn't lend itself to a clean implementation. + /// It turns out that deques are one of the least likely classes you'd want this + /// behaviour in, so until this functionality becomes very important to somebody, + /// we will leave it as-is. It can probably be solved by adding some extra code to + /// the Do* functions and adding good comments explaining the situation. + /// + template + class deque : public DequeBase + { + public: + typedef DequeBase base_type; + typedef deque this_type; + typedef T value_type; + typedef T* pointer; + typedef const T* const_pointer; + typedef T& reference; + typedef const T& const_reference; + typedef DequeIterator iterator; + typedef DequeIterator const_iterator; + typedef eastl::reverse_iterator reverse_iterator; + typedef eastl::reverse_iterator const_reverse_iterator; + typedef typename base_type::size_type size_type; + typedef typename base_type::difference_type difference_type; + typedef typename base_type::allocator_type allocator_type; + + using base_type::npos; + +#if EA_IS_ENABLED(EASTL_DEPRECATIONS_FOR_2024_APRIL) + static_assert(!is_const::value, "deque::value_type must be non-const."); + static_assert(!is_volatile::value, "deque::value_type must be non-volatile."); +#endif + + protected: + using base_type::kSideFront; + using base_type::kSideBack; + using base_type::mpPtrArray; + using base_type::mnPtrArraySize; + using base_type::mItBegin; + using base_type::mItEnd; + using base_type::mAllocator; + using base_type::DoAllocateSubarray; + using base_type::DoFreeSubarray; + using base_type::DoFreeSubarrays; + using base_type::DoAllocatePtrArray; + using base_type::DoFreePtrArray; + using base_type::DoReallocSubarray; + using base_type::DoReallocPtrArray; + + public: + deque(); + explicit deque(const allocator_type& allocator); + explicit deque(size_type n, const allocator_type& allocator = EASTL_DEQUE_DEFAULT_ALLOCATOR); + deque(size_type n, const value_type& value, const allocator_type& allocator = EASTL_DEQUE_DEFAULT_ALLOCATOR); + deque(const this_type& x); + deque(this_type&& x); + deque(this_type&& x, const allocator_type& allocator); + deque(std::initializer_list ilist, const allocator_type& allocator = EASTL_DEQUE_DEFAULT_ALLOCATOR); + + // note: this has pre-C++11 semantics: + // this constructor is equivalent to the constructor deque(static_cast(first), static_cast(last)) if InputIterator is an integral type. + template + deque(InputIterator first, InputIterator last); // allocator arg removed because VC7.1 fails on the default arg. To do: Make a second version of this function without a default arg. + + ~deque(); + + this_type& operator=(const this_type& x); + this_type& operator=(std::initializer_list ilist); + this_type& operator=(this_type&& x); + + void swap(this_type& x); + + void assign(size_type n, const value_type& value); + void assign(std::initializer_list ilist); + + template // It turns out that the C++ std::deque specifies a two argument + void assign(InputIterator first, InputIterator last); // version of assign that takes (int size, int value). These are not + // iterators, so we need to do a template compiler trick to do the right thing. + + iterator begin() EA_NOEXCEPT; + const_iterator begin() const EA_NOEXCEPT; + const_iterator cbegin() const EA_NOEXCEPT; + + iterator end() EA_NOEXCEPT; + const_iterator end() const EA_NOEXCEPT; + const_iterator cend() const EA_NOEXCEPT; + + reverse_iterator rbegin() EA_NOEXCEPT; + const_reverse_iterator rbegin() const EA_NOEXCEPT; + const_reverse_iterator crbegin() const EA_NOEXCEPT; + + reverse_iterator rend() EA_NOEXCEPT; + const_reverse_iterator rend() const EA_NOEXCEPT; + const_reverse_iterator crend() const EA_NOEXCEPT; + + bool empty() const EA_NOEXCEPT; + size_type size() const EA_NOEXCEPT; + + void resize(size_type n, const value_type& value); + void resize(size_type n); + + void shrink_to_fit(); + void set_capacity(size_type n = base_type::npos); + + reference operator[](size_type n); + const_reference operator[](size_type n) const; + + reference at(size_type n); + const_reference at(size_type n) const; + + reference front(); + const_reference front() const; + + reference back(); + const_reference back() const; + + void push_front(const value_type& value); + reference push_front(); + void push_front(value_type&& value); + + void push_back(const value_type& value); + reference push_back(); + void push_back(value_type&& value); + + void pop_front(); + void pop_back(); + + template + iterator emplace(const_iterator position, Args&&... args); + + template + reference emplace_front(Args&&... args); + + template + reference emplace_back(Args&&... args); + + iterator insert(const_iterator position, const value_type& value); + iterator insert(const_iterator position, value_type&& value); + iterator insert(const_iterator position, size_type n, const value_type& value); + iterator insert(const_iterator position, std::initializer_list ilist); + + // note: this has pre-C++11 semantics: + // this function is equivalent to insert(const_iterator position, static_cast(first), static_cast(last)) if InputIterator is an integral type. + // ie. same as insert(const_iterator position, size_type n, const value_type& value) + template + iterator insert(const_iterator position, InputIterator first, InputIterator last); + + iterator erase(const_iterator position); + iterator erase(const_iterator first, const_iterator last); + reverse_iterator erase(reverse_iterator position); + reverse_iterator erase(reverse_iterator first, reverse_iterator last); + + void clear(); + //void reset_lose_memory(); // Disabled until it can be implemented efficiently and cleanly. // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs. + + bool validate() const; + int validate_iterator(const_iterator i) const; + + protected: + template + void DoInit(Integer n, Integer value, true_type); + + template + void DoInit(InputIterator first, InputIterator last, false_type); + + template + void DoInitFromIterator(InputIterator first, InputIterator last, EASTL_ITC_NS::input_iterator_tag); + + template + void DoInitFromIterator(ForwardIterator first, ForwardIterator last, EASTL_ITC_NS::forward_iterator_tag); + + void DoFillInit(const value_type& value); + + template + void DoAssign(Integer n, Integer value, true_type); + + template + void DoAssign(InputIterator first, InputIterator last, false_type); + + void DoAssignValues(size_type n, const value_type& value); + + template + iterator DoInsert(const const_iterator& position, Integer n, Integer value, true_type); + + template + iterator DoInsert(const const_iterator& position, const InputIterator& first, const InputIterator& last, false_type); + + template + iterator DoInsertFromIterator(const_iterator position, const InputIterator& first, const InputIterator& last, EASTL_ITC_NS::input_iterator_tag); + + template + iterator DoInsertFromIterator(const_iterator position, const ForwardIterator& first, const ForwardIterator& last, EASTL_ITC_NS::forward_iterator_tag); + + iterator DoInsertValues(const_iterator position, size_type n, const value_type& value); + + void DoSwap(this_type& x); + }; // class deque + + + + + /////////////////////////////////////////////////////////////////////// + // DequeBase + /////////////////////////////////////////////////////////////////////// + + template + DequeBase::DequeBase(const allocator_type& allocator) + : mpPtrArray(NULL), + mnPtrArraySize(0), + mItBegin(), + mItEnd(), + mAllocator(allocator) + { + // It is assumed here that the deque subclass will init us when/as needed. + } + + + template + DequeBase::DequeBase(size_type n) + : mpPtrArray(NULL), + mnPtrArraySize(0), + mItBegin(), + mItEnd(), + mAllocator(EASTL_DEQUE_DEFAULT_NAME) + { + // It's important to note that DoInit creates space for elements and assigns + // mItBegin/mItEnd to point to them, but these elements are not constructed. + // You need to immediately follow this constructor with code that constructs the values. + DoInit(n); + } + + + template + DequeBase::DequeBase(size_type n, const allocator_type& allocator) + : mpPtrArray(NULL), + mnPtrArraySize(0), + mItBegin(), + mItEnd(), + mAllocator(allocator) + { + // It's important to note that DoInit creates space for elements and assigns + // mItBegin/mItEnd to point to them, but these elements are not constructed. + // You need to immediately follow this constructor with code that constructs the values. + DoInit(n); + } + + + template + DequeBase::~DequeBase() + { + if(mpPtrArray) + { + DoFreeSubarrays(mItBegin.mpCurrentArrayPtr, mItEnd.mpCurrentArrayPtr + 1); + DoFreePtrArray(mpPtrArray, mnPtrArraySize); + mpPtrArray = nullptr; + } + } + + + template + const typename DequeBase::allocator_type& + DequeBase::get_allocator() const EA_NOEXCEPT + { + return mAllocator; + } + + + template + typename DequeBase::allocator_type& + DequeBase::get_allocator() EA_NOEXCEPT + { + return mAllocator; + } + + + template + void DequeBase::set_allocator(const allocator_type& allocator) + { + // The only time you can set an allocator is with an empty unused container, such as right after construction. + if(EASTL_LIKELY(mAllocator != allocator)) + { + if(EASTL_LIKELY(mpPtrArray && (mItBegin.mpCurrentArrayPtr == mItEnd.mpCurrentArrayPtr))) // If we are empty and so can safely deallocate the existing memory... We could also test for empty(), but that's a more expensive calculation and more involved clearing, though it would be more flexible. + { + DoFreeSubarrays(mItBegin.mpCurrentArrayPtr, mItEnd.mpCurrentArrayPtr + 1); + DoFreePtrArray(mpPtrArray, mnPtrArraySize); + + mAllocator = allocator; + DoInit(0); + } + else + { + EASTL_FAIL_MSG("DequeBase::set_allocator -- atempt to change allocator after allocating elements."); + } + } + } + + + template + T* DequeBase::DoAllocateSubarray() + { + T* p = (T*)allocate_memory(mAllocator, kDequeSubarraySize * sizeof(T), EASTL_ALIGN_OF(T), 0); + EASTL_ASSERT_MSG(p != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined."); + + #if EASTL_DEBUG + memset((void*)p, 0, kDequeSubarraySize * sizeof(T)); + #endif + + return (T*)p; + } + + + template + void DequeBase::DoFreeSubarray(T* p) + { + if(p) + EASTLFree(mAllocator, p, kDequeSubarraySize * sizeof(T)); + } + + template + void DequeBase::DoFreeSubarrays(T** pBegin, T** pEnd) + { + while(pBegin < pEnd) + DoFreeSubarray(*pBegin++); + } + + template + T** DequeBase::DoAllocatePtrArray(size_type n) + { + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(n >= 0x80000000)) + EASTL_FAIL_MSG("deque::DoAllocatePtrArray -- improbably large request."); + #endif + + T** pp = (T**)allocate_memory(mAllocator, n * sizeof(T*), EASTL_ALIGN_OF(T), 0); + EASTL_ASSERT_MSG(pp != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined."); + + #if EASTL_DEBUG + memset((void*)pp, 0, n * sizeof(T*)); + #endif + + return pp; + } + + + template + void DequeBase::DoFreePtrArray(T** pp, size_t n) + { + if(pp) + EASTLFree(mAllocator, pp, n * sizeof(T*)); + } + + + template + typename DequeBase::iterator + DequeBase::DoReallocSubarray(size_type nAdditionalCapacity, Side allocationSide) + { + // nAdditionalCapacity refers to the amount of additional space we need to be + // able to store in this deque. Typically this function is called as part of + // an insert or append operation. This is the function that makes sure there + // is enough capacity for the new elements to be copied into the deque. + // The new capacity here is always at the front or back of the deque. + // This function returns an iterator to that points to the new begin or + // the new end of the deque space, depending on allocationSide. + + if(allocationSide == kSideFront) + { + // There might be some free space (nCurrentAdditionalCapacity) at the front of the existing subarray. + const size_type nCurrentAdditionalCapacity = (size_type)(mItBegin.mpCurrent - mItBegin.mpBegin); + + if(EASTL_UNLIKELY(nCurrentAdditionalCapacity < nAdditionalCapacity)) // If we need to grow downward into a new subarray... + { + const difference_type nSubarrayIncrease = (difference_type)(((nAdditionalCapacity - nCurrentAdditionalCapacity) + kDequeSubarraySize - 1) / kDequeSubarraySize); + difference_type i; + + if(nSubarrayIncrease > (mItBegin.mpCurrentArrayPtr - mpPtrArray)) // If there are not enough pointers in front of the current (first) one... + DoReallocPtrArray((size_type)(nSubarrayIncrease - (mItBegin.mpCurrentArrayPtr - mpPtrArray)), kSideFront); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + for(i = 1; i <= nSubarrayIncrease; ++i) + mItBegin.mpCurrentArrayPtr[-i] = DoAllocateSubarray(); + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + for(difference_type j = 1; j < i; ++j) + DoFreeSubarray(mItBegin.mpCurrentArrayPtr[-j]); + throw; + } + #endif + } + + return mItBegin - (difference_type)nAdditionalCapacity; + } + else // else kSideBack + { + const size_type nCurrentAdditionalCapacity = (size_type)((mItEnd.mpEnd - 1) - mItEnd.mpCurrent); + + if(EASTL_UNLIKELY(nCurrentAdditionalCapacity < nAdditionalCapacity)) // If we need to grow forward into a new subarray... + { + const difference_type nSubarrayIncrease = (difference_type)(((nAdditionalCapacity - nCurrentAdditionalCapacity) + kDequeSubarraySize - 1) / kDequeSubarraySize); + difference_type i; + + if(nSubarrayIncrease > ((mpPtrArray + mnPtrArraySize) - mItEnd.mpCurrentArrayPtr) - 1) // If there are not enough pointers after the current (last) one... + DoReallocPtrArray((size_type)(nSubarrayIncrease - (((mpPtrArray + mnPtrArraySize) - mItEnd.mpCurrentArrayPtr) - 1)), kSideBack); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + for(i = 1; i <= nSubarrayIncrease; ++i) + mItEnd.mpCurrentArrayPtr[i] = DoAllocateSubarray(); + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + for(difference_type j = 1; j < i; ++j) + DoFreeSubarray(mItEnd.mpCurrentArrayPtr[j]); + throw; + } + #endif + } + + return mItEnd + (difference_type)nAdditionalCapacity; + } + } + + + template + void DequeBase::DoReallocPtrArray(size_type nAdditionalCapacity, Side allocationSide) + { + // This function is not called unless the capacity is known to require a resize. + // + // We have an array of pointers (mpPtrArray), of which a segment of them are in use and + // at either end of the array are zero or more unused pointers. This function is being + // called because we need to extend the capacity on either side of this array by + // nAdditionalCapacity pointers. However, it's possible that if the user is continually + // using push_back and pop_front then the pointer array will continue to be extended + // on the back side and unused on the front side. So while we are doing this resizing + // here we also take the opportunity to recenter the pointers and thus be balanced. + // It man turn out that we don't even need to reallocate the pointer array in order + // to increase capacity on one side, as simply moving the pointers to the center may + // be enough to open up the requires space. + // + // Balanced pointer array Unbalanced pointer array (unused space at front, no free space at back) + // ----++++++++++++---- ---------+++++++++++ + + const size_type nUnusedPtrCountAtFront = (size_type)(mItBegin.mpCurrentArrayPtr - mpPtrArray); + const size_type nUsedPtrCount = (size_type)(mItEnd.mpCurrentArrayPtr - mItBegin.mpCurrentArrayPtr) + 1; + const size_type nUsedPtrSpace = nUsedPtrCount * sizeof(void*); + const size_type nUnusedPtrCountAtBack = (mnPtrArraySize - nUnusedPtrCountAtFront) - nUsedPtrCount; + value_type** pPtrArrayBegin; + + if((allocationSide == kSideBack) && (nAdditionalCapacity <= nUnusedPtrCountAtFront)) // If we can take advantage of unused pointers at the front without doing any reallocation... + { + if(nAdditionalCapacity < (nUnusedPtrCountAtFront / 2)) // Possibly use more space than required, if there's a lot of extra space. + nAdditionalCapacity = (nUnusedPtrCountAtFront / 2); + + pPtrArrayBegin = mpPtrArray + (nUnusedPtrCountAtFront - nAdditionalCapacity); + memmove(pPtrArrayBegin, mItBegin.mpCurrentArrayPtr, nUsedPtrSpace); + + #if EASTL_DEBUG + memset(pPtrArrayBegin + nUsedPtrCount, 0, (size_t)(mpPtrArray + mnPtrArraySize) - (size_t)(pPtrArrayBegin + nUsedPtrCount)); + #endif + } + else if((allocationSide == kSideFront) && (nAdditionalCapacity <= nUnusedPtrCountAtBack)) // If we can take advantage of unused pointers at the back without doing any reallocation... + { + if(nAdditionalCapacity < (nUnusedPtrCountAtBack / 2)) // Possibly use more space than required, if there's a lot of extra space. + nAdditionalCapacity = (nUnusedPtrCountAtBack / 2); + + pPtrArrayBegin = mItBegin.mpCurrentArrayPtr + nAdditionalCapacity; + memmove(pPtrArrayBegin, mItBegin.mpCurrentArrayPtr, nUsedPtrSpace); + + #if EASTL_DEBUG + memset(mpPtrArray, 0, (size_t)((uintptr_t)pPtrArrayBegin - (uintptr_t)mpPtrArray)); + #endif + } + else + { + // In this case we will have to do a reallocation. + const size_type nNewPtrArraySize = mnPtrArraySize + eastl::max_alt(mnPtrArraySize, nAdditionalCapacity) + 2; // Allocate extra capacity. + value_type** const pNewPtrArray = DoAllocatePtrArray(nNewPtrArraySize); + + pPtrArrayBegin = pNewPtrArray + (mItBegin.mpCurrentArrayPtr - mpPtrArray) + ((allocationSide == kSideFront) ? nAdditionalCapacity : 0); + + // The following is equivalent to: eastl::copy(mItBegin.mpCurrentArrayPtr, mItEnd.mpCurrentArrayPtr + 1, pPtrArrayBegin); + // It's OK to use memcpy instead of memmove because the destination is guaranteed to non-overlap the source. + if(mpPtrArray) // Could also say: 'if(mItBegin.mpCurrentArrayPtr)' + memcpy(pPtrArrayBegin, mItBegin.mpCurrentArrayPtr, nUsedPtrSpace); + + DoFreePtrArray(mpPtrArray, mnPtrArraySize); + + mpPtrArray = pNewPtrArray; + mnPtrArraySize = nNewPtrArraySize; + } + + // We need to reset the begin and end iterators, as code that calls this expects them to *not* be invalidated. + mItBegin.SetSubarray(pPtrArrayBegin); + mItEnd.SetSubarray((pPtrArrayBegin + nUsedPtrCount) - 1); + } + + + template + void DequeBase::DoInit(size_type n) + { + // This code is disabled because it doesn't currently work properly. + // We are trying to make it so that a deque can have a zero allocation + // initial empty state, but we (OK, I) am having a hard time making + // this elegant and efficient. + //if(n) + //{ + const size_type nNewPtrArraySize = (size_type)((n / kDequeSubarraySize) + 1); // Always have at least one, even if n is zero. + const size_type kMinPtrArraySize_ = kMinPtrArraySize; + + mnPtrArraySize = eastl::max_alt(kMinPtrArraySize_, (nNewPtrArraySize + 2)); + mpPtrArray = DoAllocatePtrArray(mnPtrArraySize); + + value_type** const pPtrArrayBegin = (mpPtrArray + ((mnPtrArraySize - nNewPtrArraySize) / 2)); // Try to place it in the middle. + value_type** const pPtrArrayEnd = pPtrArrayBegin + nNewPtrArraySize; + value_type** pPtrArrayCurrent = pPtrArrayBegin; + + #if EASTL_EXCEPTIONS_ENABLED + try + { + try + { + #endif + while(pPtrArrayCurrent < pPtrArrayEnd) + *pPtrArrayCurrent++ = DoAllocateSubarray(); + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + DoFreeSubarrays(pPtrArrayBegin, pPtrArrayCurrent); + throw; + } + } + catch(...) + { + DoFreePtrArray(mpPtrArray, mnPtrArraySize); + mpPtrArray = NULL; + mnPtrArraySize = 0; + throw; + } + #endif + + mItBegin.SetSubarray(pPtrArrayBegin); + mItBegin.mpCurrent = mItBegin.mpBegin; + + mItEnd.SetSubarray(pPtrArrayEnd - 1); + mItEnd.mpCurrent = mItEnd.mpBegin + (difference_type)(n % kDequeSubarraySize); + //} + //else // Else we do a zero-allocation initialization. + //{ + // mpPtrArray = NULL; + // mnPtrArraySize = 0; + // + // mItBegin.mpCurrentArrayPtr = NULL; + // mItBegin.mpBegin = NULL; + // mItBegin.mpEnd = NULL; // We intentionally create a situation whereby the subarray that has no capacity. + // mItBegin.mpCurrent = NULL; + // + // mItEnd = mItBegin; + //} + } + + + + /////////////////////////////////////////////////////////////////////// + // DequeIterator + /////////////////////////////////////////////////////////////////////// + + template + DequeIterator::DequeIterator() + : mpCurrent(NULL), mpBegin(NULL), mpEnd(NULL), mpCurrentArrayPtr(NULL) + { + // Empty + } + + + template + DequeIterator::DequeIterator(T** pCurrentArrayPtr, T* pCurrent) + : mpCurrent(pCurrent), mpBegin(*pCurrentArrayPtr), mpEnd(pCurrent + kDequeSubarraySize), mpCurrentArrayPtr(pCurrentArrayPtr) + { + // Empty + } + + + template + DequeIterator::DequeIterator(const iterator& x) + : mpCurrent(x.mpCurrent), mpBegin(x.mpBegin), mpEnd(x.mpEnd), mpCurrentArrayPtr(x.mpCurrentArrayPtr) + { + // Empty + } + + template + DequeIterator& DequeIterator::operator=(const iterator& x) + { + mpCurrent = x.mpCurrent; + mpBegin = x.mpBegin; + mpEnd = x.mpEnd; + mpCurrentArrayPtr = x.mpCurrentArrayPtr; + + return *this; + } + + + template + DequeIterator::DequeIterator(const iterator& x, Increment) + : mpCurrent(x.mpCurrent), mpBegin(x.mpBegin), mpEnd(x.mpEnd), mpCurrentArrayPtr(x.mpCurrentArrayPtr) + { + operator++(); + } + + + template + DequeIterator::DequeIterator(const iterator& x, Decrement) + : mpCurrent(x.mpCurrent), mpBegin(x.mpBegin), mpEnd(x.mpEnd), mpCurrentArrayPtr(x.mpCurrentArrayPtr) + { + operator--(); + } + + + template + typename DequeIterator::pointer + DequeIterator::operator->() const + { + return mpCurrent; + } + + + template + typename DequeIterator::reference + DequeIterator::operator*() const + { + return *mpCurrent; + } + + + template + typename DequeIterator::this_type& + DequeIterator::operator++() + { + if(EASTL_UNLIKELY(++mpCurrent == mpEnd)) + { + mpBegin = *++mpCurrentArrayPtr; + mpEnd = mpBegin + kDequeSubarraySize; + mpCurrent = mpBegin; + } + return *this; + } + + + template + typename DequeIterator::this_type + DequeIterator::operator++(int) + { + const this_type temp(*this); + operator++(); + return temp; + } + + + template + typename DequeIterator::this_type& + DequeIterator::operator--() + { + if(EASTL_UNLIKELY(mpCurrent == mpBegin)) + { + mpBegin = *--mpCurrentArrayPtr; + mpEnd = mpBegin + kDequeSubarraySize; + mpCurrent = mpEnd; // fall through... + } + --mpCurrent; + return *this; + } + + + template + typename DequeIterator::this_type + DequeIterator::operator--(int) + { + const this_type temp(*this); + operator--(); + return temp; + } + + + template + typename DequeIterator::this_type& + DequeIterator::operator+=(difference_type n) + { + const difference_type subarrayPosition = (mpCurrent - mpBegin) + n; + + // Cast from signed to unsigned (size_t) in order to obviate the need to compare to < 0. + if((size_t)subarrayPosition < (size_t)kDequeSubarraySize) // If the new position is within the current subarray (i.e. >= 0 && < kSubArraySize)... + mpCurrent += n; + else + { + // This implementation is a branchless version which works by offsetting + // the math to always be in the positive range. Much of the values here + // reduce to constants and both the multiplication and division are of + // power of two sizes and so this calculation ends up compiling down to + // just one addition, one shift and one subtraction. This algorithm has + // a theoretical weakness in that on 32 bit systems it will fail if the + // value of n is >= (2^32 - 2^24) or 4,278,190,080 of if kDequeSubarraySize + // is >= 2^24 or 16,777,216. + EASTL_CT_ASSERT((kDequeSubarraySize & (kDequeSubarraySize - 1)) == 0); // Verify that it is a power of 2. + const difference_type subarrayIndex = (((16777216 + subarrayPosition) / (difference_type)kDequeSubarraySize)) - (16777216 / (difference_type)kDequeSubarraySize); + + SetSubarray(mpCurrentArrayPtr + subarrayIndex); + mpCurrent = mpBegin + (subarrayPosition - (subarrayIndex * (difference_type)kDequeSubarraySize)); + } + return *this; + } + + + template + typename DequeIterator::this_type& + DequeIterator::operator-=(difference_type n) + { + return (*this).operator+=(-n); + } + + + template + typename DequeIterator::this_type + DequeIterator::operator+(difference_type n) const + { + return this_type(*this).operator+=(n); + } + + + template + typename DequeIterator::this_type + DequeIterator::operator-(difference_type n) const + { + return this_type(*this).operator+=(-n); + } + + + template + typename DequeIterator::this_type + DequeIterator::move(const iterator& first, const iterator& last, true_type) + { + // To do: Implement this as a loop which does memcpys between subarrays appropriately. + // Currently we only do memcpy if the entire operation occurs within a single subarray. + if((first.mpBegin == last.mpBegin) && (first.mpBegin == mpBegin)) // If all operations are within the same subarray, implement the operation as a memmove. + { + memmove(mpCurrent, first.mpCurrent, (size_t)((uintptr_t)last.mpCurrent - (uintptr_t)first.mpCurrent)); + return *this + (last.mpCurrent - first.mpCurrent); + } + return eastl::move(first, last, *this); + } + + + template + typename DequeIterator::this_type + DequeIterator::move(const iterator& first, const iterator& last, false_type) + { + return eastl::move(first, last, *this); + } + + + template + void DequeIterator::move_backward(const iterator& first, const iterator& last, true_type) + { + // To do: Implement this as a loop which does memmoves between subarrays appropriately. + // Currently we only do memcpy if the entire operation occurs within a single subarray. + if((first.mpBegin == last.mpBegin) && (first.mpBegin == mpBegin)) // If all operations are within the same subarray, implement the operation as a memcpy. + memmove(mpCurrent - (last.mpCurrent - first.mpCurrent), first.mpCurrent, (size_t)((uintptr_t)last.mpCurrent - (uintptr_t)first.mpCurrent)); + else + eastl::move_backward(first, last, *this); + } + + + template + void DequeIterator::move_backward(const iterator& first, const iterator& last, false_type) + { + eastl::move_backward(first, last, *this); + } + + + template + void DequeIterator::SetSubarray(T** pCurrentArrayPtr) + { + mpCurrentArrayPtr = pCurrentArrayPtr; + mpBegin = *pCurrentArrayPtr; + mpEnd = mpBegin + kDequeSubarraySize; + } + + + // The C++ defect report #179 requires that we support comparisons between const and non-const iterators. + // Thus we provide additional template paremeters here to support this. The defect report does not + // require us to support comparisons between reverse_iterators and const_reverse_iterators. + template + inline bool operator==(const DequeIterator& a, + const DequeIterator& b) + { + return a.mpCurrent == b.mpCurrent; + } + + + template + inline bool operator!=(const DequeIterator& a, + const DequeIterator& b) + { + return a.mpCurrent != b.mpCurrent; + } + + + // We provide a version of operator!= for the case where the iterators are of the + // same type. This helps prevent ambiguity errors in the presence of rel_ops. + template + inline bool operator!=(const DequeIterator& a, + const DequeIterator& b) + { + return a.mpCurrent != b.mpCurrent; + } + + + template + inline bool operator<(const DequeIterator& a, + const DequeIterator& b) + { + return (a.mpCurrentArrayPtr == b.mpCurrentArrayPtr) ? (a.mpCurrent < b.mpCurrent) : (a.mpCurrentArrayPtr < b.mpCurrentArrayPtr); + } + + + template + inline bool operator>(const DequeIterator& a, + const DequeIterator& b) + { + return (a.mpCurrentArrayPtr == b.mpCurrentArrayPtr) ? (a.mpCurrent > b.mpCurrent) : (a.mpCurrentArrayPtr > b.mpCurrentArrayPtr); + } + + + template + inline bool operator<=(const DequeIterator& a, + const DequeIterator& b) + { + return (a.mpCurrentArrayPtr == b.mpCurrentArrayPtr) ? (a.mpCurrent <= b.mpCurrent) : (a.mpCurrentArrayPtr <= b.mpCurrentArrayPtr); + } + + + template + inline bool operator>=(const DequeIterator& a, + const DequeIterator& b) + { + return (a.mpCurrentArrayPtr == b.mpCurrentArrayPtr) ? (a.mpCurrent >= b.mpCurrent) : (a.mpCurrentArrayPtr >= b.mpCurrentArrayPtr); + } + + + // Random access iterators must support operator + and operator -. + // You can only add an integer to an iterator, and you cannot add two iterators. + template + inline DequeIterator + operator+(ptrdiff_t n, const DequeIterator& x) + { + return x + n; // Implement (n + x) in terms of (x + n). + } + + + // You can only add an integer to an iterator, but you can subtract two iterators. + // The C++ defect report #179 mentioned above specifically refers to + // operator - and states that we support the subtraction of const and non-const iterators. + template + inline typename DequeIterator::difference_type + operator-(const DequeIterator& a, + const DequeIterator& b) + { + // This is a fairly clever algorithm that has been used in STL deque implementations since the original HP STL: + typedef typename DequeIterator::difference_type difference_type; + + return ((difference_type)kDequeSubarraySize * ((a.mpCurrentArrayPtr - b.mpCurrentArrayPtr) - 1)) + (a.mpCurrent - a.mpBegin) + (b.mpEnd - b.mpCurrent); + } + + + + + /////////////////////////////////////////////////////////////////////// + // deque + /////////////////////////////////////////////////////////////////////// + + template + inline deque::deque() + : base_type((size_type)0) + { + // Empty + } + + + template + inline deque::deque(const allocator_type& allocator) + : base_type((size_type)0, allocator) + { + // Empty + } + + + template + inline deque::deque(size_type n, const allocator_type& allocator) + : base_type(n, allocator) + { + DoFillInit(value_type()); + } + + + template + inline deque::deque(size_type n, const value_type& value, const allocator_type& allocator) + : base_type(n, allocator) + { + DoFillInit(value); + } + + + template + inline deque::deque(const this_type& x) + : base_type(x.size(), x.mAllocator) + { + eastl::uninitialized_copy(x.mItBegin, x.mItEnd, mItBegin); + } + + + template + inline deque::deque(this_type&& x) + : base_type((size_type)0, x.mAllocator) + { + swap(x); + } + + + template + inline deque::deque(this_type&& x, const allocator_type& allocator) + : base_type((size_type)0, allocator) + { + swap(x); // member swap handles the case that x has a different allocator than our allocator by doing a copy. + } + + + template + inline deque::deque(std::initializer_list ilist, const allocator_type& allocator) + : base_type(allocator) + { + DoInit(ilist.begin(), ilist.end(), false_type()); + } + + + template + template + inline deque::deque(InputIterator first, InputIterator last) + : base_type(EASTL_DEQUE_DEFAULT_ALLOCATOR) // Call the empty base constructor, which does nothing. We need to do all the work in our own DoInit. + { + DoInit(first, last, is_integral()); + } + + + template + inline deque::~deque() + { + // Call destructors. Parent class will free the memory. + for(iterator itCurrent(mItBegin); itCurrent != mItEnd; ++itCurrent) + itCurrent.mpCurrent->~value_type(); + } + + + template + typename deque::this_type& + deque::operator=(const this_type& x) + { + if(&x != this) // If not assigning to ourselves... + { + // If (EASTL_ALLOCATOR_COPY_ENABLED == 1) and the current contents are allocated by an + // allocator that's unequal to x's allocator, we need to reallocate our elements with + // our current allocator and reallocate it with x's allocator. If the allocators are + // equal then we can use a more optimal algorithm that doesn't reallocate our elements + // but instead can copy them in place. + + #if EASTL_ALLOCATOR_COPY_ENABLED + bool bSlowerPathwayRequired = (mAllocator != x.mAllocator); + #else + bool bSlowerPathwayRequired = false; + #endif + + if(bSlowerPathwayRequired) + { + // We can't currently use set_capacity(0) or shrink_to_fit, because they + // leave a remaining allocation with our old allocator. So we do a similar + // thing but set our allocator to x.mAllocator while doing so. + this_type temp(x.mAllocator); + DoSwap(temp); + // Now we have an empty container with an allocator equal to x.mAllocator, ready to assign from x. + } + + DoAssign(x.begin(), x.end(), eastl::false_type()); + } + + return *this; + } + + + template + inline typename deque::this_type& + deque::operator=(this_type&& x) + { + if(this != &x) + { + this_type temp(mAllocator); + swap(temp); + swap(x); // member swap handles the case that x has a different allocator than our allocator by doing a copy. + } + return *this; + } + + + template + inline typename deque::this_type& + deque::operator=(std::initializer_list ilist) + { + DoAssign(ilist.begin(), ilist.end(), false_type()); + return *this; + } + + + template + inline void deque::assign(size_type n, const value_type& value) + { + DoAssignValues(n, value); + } + + + template + inline void deque::assign(std::initializer_list ilist) + { + DoAssign(ilist.begin(), ilist.end(), false_type()); + } + + + // It turns out that the C++ std::deque specifies a two argument + // version of assign that takes (int size, int value). These are not + // iterators, so we need to do a template compiler trick to do the right thing. + template + template + inline void deque::assign(InputIterator first, InputIterator last) + { + DoAssign(first, last, is_integral()); + } + + + template + inline typename deque::iterator + deque::begin() EA_NOEXCEPT + { + return mItBegin; + } + + + template + inline typename deque::const_iterator + deque::begin() const EA_NOEXCEPT + { + return mItBegin; + } + + + template + inline typename deque::const_iterator + deque::cbegin() const EA_NOEXCEPT + { + return mItBegin; + } + + + template + inline typename deque::iterator + deque::end() EA_NOEXCEPT + { + return mItEnd; + } + + + template + typename deque::const_iterator + deque::end() const EA_NOEXCEPT + { + return mItEnd; + } + + + template + inline typename deque::const_iterator + deque::cend() const EA_NOEXCEPT + { + return mItEnd; + } + + + template + inline typename deque::reverse_iterator + deque::rbegin() EA_NOEXCEPT + { + return reverse_iterator(mItEnd); + } + + + template + inline typename deque::const_reverse_iterator + deque::rbegin() const EA_NOEXCEPT + { + return const_reverse_iterator(mItEnd); + } + + + template + inline typename deque::const_reverse_iterator + deque::crbegin() const EA_NOEXCEPT + { + return const_reverse_iterator(mItEnd); + } + + + template + inline typename deque::reverse_iterator + deque::rend() EA_NOEXCEPT + { + return reverse_iterator(mItBegin); + } + + + template + inline typename deque::const_reverse_iterator + deque::rend() const EA_NOEXCEPT + { + return const_reverse_iterator(mItBegin); + } + + + template + inline typename deque::const_reverse_iterator + deque::crend() const EA_NOEXCEPT + { + return const_reverse_iterator(mItBegin); + } + + + template + inline bool deque::empty() const EA_NOEXCEPT + { + return mItBegin.mpCurrent == mItEnd.mpCurrent; + } + + + template + typename deque::size_type + inline deque::size() const EA_NOEXCEPT + { + return (size_type)(mItEnd - mItBegin); + } + + + template + inline void deque::resize(size_type n, const value_type& value) + { + const size_type nSizeCurrent = size(); + + if(n > nSizeCurrent) // We expect that more often than not, resizes will be upsizes. + insert(mItEnd, n - nSizeCurrent, value); + else + erase(mItBegin + (difference_type)n, mItEnd); + } + + + template + inline void deque::resize(size_type n) + { + resize(n, value_type()); + } + + + template + inline void deque::shrink_to_fit() + { + this_type x(eastl::make_move_iterator(begin()), eastl::make_move_iterator(end())); + swap(x); + } + + + template + inline void deque::set_capacity(size_type n) + { + // Currently there isn't a way to remove all allocations from a deque, as it + // requires a single starting allocation for the subarrays. So we can't just + // free all memory without leaving it in a bad state. So the best means of + // implementing set_capacity() is to do what we do below. + + if(n == 0) + { + this_type temp(mAllocator); + DoSwap(temp); + } + else if(n < size()) + { + // We currently ignore the request to reduce capacity. To do: Implement this + // and do it in a way that doesn't result in temporarily ~doubling our memory usage. + // That might involve trimming unused subarrays from the front or back of + // the container. + resize(n); + } + } + + + template + typename deque::reference + deque::operator[](size_type n) + { + #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED + if (EASTL_UNLIKELY(n >= (size_type)(mItEnd - mItBegin))) + EASTL_FAIL_MSG("deque::operator[] -- out of range"); + #elif EASTL_ASSERT_ENABLED + // We allow taking a reference to deque[0] + if (EASTL_UNLIKELY((n != 0) && n >= (size_type)(mItEnd - mItBegin))) + EASTL_FAIL_MSG("deque::operator[] -- out of range"); + #endif + + // See DequeIterator::operator+=() for an explanation of the code below. + iterator it(mItBegin); + + const difference_type subarrayPosition = (difference_type)((it.mpCurrent - it.mpBegin) + (difference_type)n); + const difference_type subarrayIndex = (((16777216 + subarrayPosition) / (difference_type)kDequeSubarraySize)) - (16777216 / (difference_type)kDequeSubarraySize); + + return *(*(it.mpCurrentArrayPtr + subarrayIndex) + (subarrayPosition - (subarrayIndex * (difference_type)kDequeSubarraySize))); + } + + + template + typename deque::const_reference + deque::operator[](size_type n) const + { + #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED + if (EASTL_UNLIKELY(n >= (size_type)(mItEnd - mItBegin))) + EASTL_FAIL_MSG("deque::operator[] -- out of range"); + #elif EASTL_ASSERT_ENABLED + // We allow the user to use a reference to deque[0] of an empty container. + if (EASTL_UNLIKELY((n != 0) && n >= (size_type)(mItEnd - mItBegin))) + EASTL_FAIL_MSG("deque::operator[] -- out of range"); + #endif + + // See DequeIterator::operator+=() for an explanation of the code below. + iterator it(mItBegin); + + const difference_type subarrayPosition = (it.mpCurrent - it.mpBegin) + (difference_type)n; + const difference_type subarrayIndex = (((16777216 + subarrayPosition) / (difference_type)kDequeSubarraySize)) - (16777216 / (difference_type)kDequeSubarraySize); + + return *(*(it.mpCurrentArrayPtr + subarrayIndex) + (subarrayPosition - (subarrayIndex * (difference_type)kDequeSubarraySize))); + } + + + template + typename deque::reference + deque::at(size_type n) + { + #if EASTL_EXCEPTIONS_ENABLED + if(n >= (size_type)(mItEnd - mItBegin)) + throw std::out_of_range("deque::at -- out of range"); + #elif EASTL_ASSERT_ENABLED + if(n >= (size_type)(mItEnd - mItBegin)) + EASTL_FAIL_MSG("deque::at -- out of range"); + #endif + return *(mItBegin.operator+((difference_type)n)); + } + + + template + typename deque::const_reference + deque::at(size_type n) const + { + #if EASTL_EXCEPTIONS_ENABLED + if(n >= (size_type)(mItEnd - mItBegin)) + throw std::out_of_range("deque::at -- out of range"); + #elif EASTL_ASSERT_ENABLED + if(n >= (size_type)(mItEnd - mItBegin)) + EASTL_FAIL_MSG("deque::at -- out of range"); + #endif + return *(mItBegin.operator+((difference_type)n)); + } + + + template + typename deque::reference + deque::front() + { + #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED + if (EASTL_UNLIKELY((size_type)(mItEnd == mItBegin))) + EASTL_FAIL_MSG("deque::front -- empty deque"); + #else + // We allow the user to reference an empty container. + #endif + + return *mItBegin; + } + + + template + typename deque::const_reference + deque::front() const + { + #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED + if (EASTL_UNLIKELY((size_type)(mItEnd == mItBegin))) + EASTL_FAIL_MSG("deque::front -- empty deque"); + #else + // We allow the user to reference an empty container. + #endif + + return *mItBegin; + } + + + template + typename deque::reference + deque::back() + { + #if EASTL_ASSERT_ENABLED + // Decrementing an iterator with an empty container will result in undefined behaviour. + // specifically: the iterator decrement will apply pointer arithmetic to a nullptr (depending on the situation either mpCurrentArrayPtr or mpBegin). + if (EASTL_UNLIKELY((size_type)(mItEnd == mItBegin))) + EASTL_FAIL_MSG("deque::back -- empty deque"); + #endif + + return *iterator(mItEnd, typename iterator::Decrement()); + } + + + template + typename deque::const_reference + deque::back() const + { + #if EASTL_ASSERT_ENABLED + // Decrementing an iterator with an empty container will result in undefined behaviour. + // specifically: the iterator decrement will apply pointer arithmetic to a nullptr (depending on the situation either mpCurrentArrayPtr or mpBegin). + if (EASTL_UNLIKELY((size_type)(mItEnd == mItBegin))) + EASTL_FAIL_MSG("deque::back -- empty deque"); + #endif + + return *iterator(mItEnd, typename iterator::Decrement()); + } + + + template + void deque::push_front(const value_type& value) + { + emplace_front(value); + } + + + template + void deque::push_front(value_type&& value) + { + emplace_front(eastl::move(value)); + } + + + template + typename deque::reference + deque::push_front() + { + emplace_front(value_type()); + return *mItBegin; // Same as return front(); + } + + + template + void deque::push_back(const value_type& value) + { + emplace_back(value); + } + + + template + void deque::push_back(value_type&& value) + { + emplace_back(eastl::move(value)); + } + + + template + typename deque::reference + deque::push_back() + { + emplace_back(value_type()); + return *iterator(mItEnd, typename iterator::Decrement()); // Same thing as return back(); + } + + + template + void deque::pop_front() + { + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY((size_type)(mItEnd == mItBegin))) + EASTL_FAIL_MSG("deque::pop_front -- empty deque"); + #endif + + if((mItBegin.mpCurrent + 1) != mItBegin.mpEnd) // If the operation is very simple... + (mItBegin.mpCurrent++)->~value_type(); + else + { + // This is executed only when we are popping the end (last) item off the front-most subarray. + // In this case we need to free the subarray and point mItBegin to the next subarray. + #ifdef EA_DEBUG + value_type** pp = mItBegin.mpCurrentArrayPtr; + #endif + + mItBegin.mpCurrent->~value_type(); // mpCurrent == mpEnd - 1 + DoFreeSubarray(mItBegin.mpBegin); + mItBegin.SetSubarray(mItBegin.mpCurrentArrayPtr + 1); + mItBegin.mpCurrent = mItBegin.mpBegin; + + #ifdef EA_DEBUG + *pp = NULL; + #endif + } + } + + + template + void deque::pop_back() + { + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY((size_type)(mItEnd == mItBegin))) + EASTL_FAIL_MSG("deque::pop_back -- empty deque"); + #endif + + if(mItEnd.mpCurrent != mItEnd.mpBegin) // If the operation is very simple... + (--mItEnd.mpCurrent)->~value_type(); + else + { + // This is executed only when we are popping the first item off the last subarray. + // In this case we need to free the subarray and point mItEnd to the previous subarray. + #ifdef EA_DEBUG + value_type** pp = mItEnd.mpCurrentArrayPtr; + #endif + + DoFreeSubarray(mItEnd.mpBegin); + mItEnd.SetSubarray(mItEnd.mpCurrentArrayPtr - 1); + mItEnd.mpCurrent = mItEnd.mpEnd - 1; // Recall that mItEnd points to one-past the last item in the container. + mItEnd.mpCurrent->~value_type(); // Thus we need to call the destructor on the item *before* that last item. + + #ifdef EA_DEBUG + *pp = NULL; + #endif + } + } + + + template + template + typename deque::iterator + deque::emplace(const_iterator position, Args&&... args) + { + if(EASTL_UNLIKELY(position.mpCurrent == mItEnd.mpCurrent)) // If we are doing the same thing as push_back... + { + emplace_back(eastl::forward(args)...); + return iterator(mItEnd, typename iterator::Decrement()); // Unfortunately, we need to make an iterator here, as the above push_back is an operation that can invalidate existing iterators. + } + else if(EASTL_UNLIKELY(position.mpCurrent == mItBegin.mpCurrent)) // If we are doing the same thing as push_front... + { + emplace_front(eastl::forward(args)...); + return mItBegin; + } + + iterator itPosition(position, typename iterator::FromConst()); + value_type valueSaved(eastl::forward(args)...); // We need to save this because value may come from within our container. It would be somewhat tedious to make a workaround that could avoid this. + const difference_type i(itPosition - mItBegin); + + #if EASTL_ASSERT_ENABLED + EASTL_ASSERT(!empty()); // The push_front and push_back calls below assume that we are non-empty. It turns out this is never called unless so. + + if(EASTL_UNLIKELY(!(validate_iterator(itPosition) & isf_valid))) + EASTL_FAIL_MSG("deque::emplace -- invalid iterator"); + #endif + + if(i < (difference_type)(size() / 2)) // Should we insert at the front or at the back? We divide the range in half. + { + emplace_front(eastl::move(*mItBegin)); // This operation potentially invalidates all existing iterators and so we need to assign them anew relative to mItBegin below. + + itPosition = mItBegin + i; + + const iterator newPosition (itPosition, typename iterator::Increment()); + iterator oldBegin (mItBegin, typename iterator::Increment()); + const iterator oldBeginPlus1(oldBegin, typename iterator::Increment()); + + oldBegin.move(oldBeginPlus1, newPosition, eastl::is_trivially_copyable()); + } + else + { + emplace_back(eastl::move(*iterator(mItEnd, typename iterator::Decrement()))); + + itPosition = mItBegin + i; + + iterator oldBack (mItEnd, typename iterator::Decrement()); + const iterator oldBackMinus1(oldBack, typename iterator::Decrement()); + + oldBack.move_backward(itPosition, oldBackMinus1, eastl::is_trivially_copyable()); + } + + *itPosition = eastl::move(valueSaved); + + return itPosition; + } + + template + template + typename deque::reference deque::emplace_front(Args&&... args) + { + if(mItBegin.mpCurrent != mItBegin.mpBegin) // If we have room in the first subarray... we hope that usually this 'new' pathway gets executed, as it is slightly faster. + ::new((void*)--mItBegin.mpCurrent) value_type(eastl::forward(args)...); // Construct in place. If args is a single arg of type value_type&& then it this will be a move construction. + else + { + // To consider: Detect if value isn't coming from within this container and handle that efficiently. + value_type valueSaved(eastl::forward(args)...); // We need to make a temporary, because args may be a value_type that comes from within our container and the operations below may change the container. But we can use move instead of copy. + + if(mItBegin.mpCurrentArrayPtr == mpPtrArray) // If there are no more pointers in front of the current (first) one... + DoReallocPtrArray(1, kSideFront); + + mItBegin.mpCurrentArrayPtr[-1] = DoAllocateSubarray(); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + mItBegin.SetSubarray(mItBegin.mpCurrentArrayPtr - 1); + mItBegin.mpCurrent = mItBegin.mpEnd - 1; + ::new((void*)mItBegin.mpCurrent) value_type(eastl::move(valueSaved)); + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + ++mItBegin; // The exception could only occur in the new operation above, after we have incremented mItBegin. So we need to undo it. + DoFreeSubarray(mItBegin.mpCurrentArrayPtr[-1]); + throw; + } + #endif + } + + return *mItBegin; // Same as return front(); + } + + template + template + typename deque::reference deque::emplace_back(Args&&... args) + { + if ((mItEnd.mpCurrent + 1) != mItEnd.mpEnd) // If we have room in the last subarray... we hope that usually this 'new' pathway gets executed, as it is slightly faster. + { + reference back = *mItEnd.mpCurrent; + ::new((void*)mItEnd.mpCurrent++) value_type(eastl::forward(args)...); // Construct in place. If args is a single arg of type value_type&& then it this will be a move construction. + return back; + } + else + { + // To consider: Detect if value isn't coming from within this container and handle that efficiently. + value_type valueSaved(eastl::forward(args)...); // We need to make a temporary, because args may be a value_type that comes from within our container and the operations below may change the container. But we can use move instead of copy. + if(((mItEnd.mpCurrentArrayPtr - mpPtrArray) + 1) >= (difference_type)mnPtrArraySize) // If there are no more pointers after the current (last) one. + DoReallocPtrArray(1, kSideBack); + + mItEnd.mpCurrentArrayPtr[1] = DoAllocateSubarray(); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + ::new((void*)mItEnd.mpCurrent) value_type(eastl::move(valueSaved)); // We can move valueSaved into position. + mItEnd.SetSubarray(mItEnd.mpCurrentArrayPtr + 1); + mItEnd.mpCurrent = mItEnd.mpBegin; + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + // No need to execute '--mItEnd', as the exception could only occur in the new operation above before we set mItEnd. + DoFreeSubarray(mItEnd.mpCurrentArrayPtr[1]); + throw; + } + #endif + + return *iterator(mItEnd, typename iterator::Decrement()); // Same as return back(); + } + } + + + template + typename deque::iterator + deque::insert(const_iterator position, const value_type& value) + { + return emplace(position, value); + } + + + template + typename deque::iterator + deque::insert(const_iterator position, value_type&& value) + { + return emplace(position, eastl::move(value)); + } + + + template + typename deque::iterator + deque::insert(const_iterator position, size_type n, const value_type& value) + { + return DoInsertValues(position, n, value); + } + + + template + template + typename deque::iterator + deque::insert(const_iterator position, InputIterator first, InputIterator last) + { + return DoInsert(position, first, last, is_integral()); // The C++ standard requires this sort of behaviour, as InputIterator might actually be Integer and 'first' is really 'count' and 'last' is really 'value'. + } + + + template + typename deque::iterator + deque::insert(const_iterator position, std::initializer_list ilist) + { + const difference_type i(position - mItBegin); + DoInsert(position, ilist.begin(), ilist.end(), false_type()); + return (mItBegin + i); + } + + + template + typename deque::iterator + deque::erase(const_iterator position) + { + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(!(validate_iterator(position) & isf_valid))) + EASTL_FAIL_MSG("deque::erase -- invalid iterator"); + + if(EASTL_UNLIKELY(position == end())) + EASTL_FAIL_MSG("deque::erase -- end() iterator is an invalid iterator for erase"); + #endif + + iterator itPosition(position, typename iterator::FromConst()); + iterator itNext(itPosition, typename iterator::Increment()); + const difference_type i(itPosition - mItBegin); + + if(i < (difference_type)(size() / 2)) // Should we move the front entries forward or the back entries backward? We divide the range in half. + { + itNext.move_backward(mItBegin, itPosition, eastl::is_trivially_copyable()); + pop_front(); + } + else + { + itPosition.move(itNext, mItEnd, eastl::is_trivially_copyable()); + pop_back(); + } + + return mItBegin + i; + } + + + template + typename deque::iterator + deque::erase(const_iterator first, const_iterator last) + { + iterator itFirst(first, typename iterator::FromConst()); + iterator itLast(last, typename iterator::FromConst()); + + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(!(validate_iterator(itFirst) & isf_valid))) + EASTL_FAIL_MSG("deque::erase -- invalid iterator"); + if(EASTL_UNLIKELY(!(validate_iterator(itLast) & isf_valid))) + EASTL_FAIL_MSG("deque::erase -- invalid iterator"); + #endif + + if((itFirst != mItBegin) || (itLast != mItEnd)) // If not erasing everything... (We expect that the user won't call erase(begin, end) because instead the user would just call clear.) + { + const difference_type n(itLast - itFirst); + const difference_type i(itFirst - mItBegin); + + if(i < (difference_type)((size() - n) / 2)) // Should we move the front entries forward or the back entries backward? We divide the range in half. + { + const iterator itNewBegin(mItBegin + n); + value_type** const pPtrArrayBegin = mItBegin.mpCurrentArrayPtr; + + itLast.move_backward(mItBegin, itFirst, eastl::is_trivially_copyable()); + + for(; mItBegin != itNewBegin; ++mItBegin) // Question: If value_type is a POD type, will the compiler generate this loop at all? + mItBegin.mpCurrent->~value_type(); // If so, then we need to make a specialization for destructing PODs. + + DoFreeSubarrays(pPtrArrayBegin, itNewBegin.mpCurrentArrayPtr); + + // mItBegin = itNewBegin; <-- Not necessary, as the above loop makes it so already. + } + else // Else we will be moving back entries backward. + { + iterator itNewEnd(mItEnd - n); + value_type** const pPtrArrayEnd = itNewEnd.mpCurrentArrayPtr + 1; + + itFirst.move(itLast, mItEnd, eastl::is_trivially_copyable()); + + for(iterator itTemp(itNewEnd); itTemp != mItEnd; ++itTemp) + itTemp.mpCurrent->~value_type(); + + DoFreeSubarrays(pPtrArrayEnd, mItEnd.mpCurrentArrayPtr + 1); + + mItEnd = itNewEnd; + } + + return mItBegin + i; + } + + clear(); + return mItEnd; + } + + + template + typename deque::reverse_iterator + deque::erase(reverse_iterator position) + { + return reverse_iterator(erase((++position).base())); + } + + + template + typename deque::reverse_iterator + deque::erase(reverse_iterator first, reverse_iterator last) + { + // Version which erases in order from first to last. + // difference_type i(first.base() - last.base()); + // while(i--) + // first = erase(first); + // return first; + + // Version which erases in order from last to first, but is slightly more efficient: + return reverse_iterator(erase(last.base(), first.base())); + } + + + template + void deque::clear() + { + // Destroy all values and all subarrays they belong to, except for the first one, + // as we need to reserve some space for a valid mItBegin/mItEnd. + if(mItBegin.mpCurrentArrayPtr != mItEnd.mpCurrentArrayPtr) // If there are multiple subarrays (more often than not, this will be so)... + { + for(value_type* p1 = mItBegin.mpCurrent; p1 < mItBegin.mpEnd; ++p1) + p1->~value_type(); + for(value_type* p2 = mItEnd.mpBegin; p2 < mItEnd.mpCurrent; ++p2) + p2->~value_type(); + DoFreeSubarray(mItEnd.mpBegin); // Leave mItBegin with a valid subarray. + } + else + { + for(value_type* p = mItBegin.mpCurrent; p < mItEnd.mpCurrent; ++p) + p->~value_type(); + // Don't free the one existing subarray, as we need it for mItBegin/mItEnd. + } + + for(value_type** pPtrArray = mItBegin.mpCurrentArrayPtr + 1; pPtrArray < mItEnd.mpCurrentArrayPtr; ++pPtrArray) + { + for(value_type* p = *pPtrArray, *pEnd = *pPtrArray + kDequeSubarraySize; p < pEnd; ++p) + p->~value_type(); + DoFreeSubarray(*pPtrArray); + } + + mItEnd = mItBegin; // mItBegin/mItEnd will not be dereferencable. + } + + + //template + //void deque::reset_lose_memory() + //{ + // // The reset_lose_memory function is a special extension function which unilaterally + // // resets the container to an empty state without freeing the memory of + // // the contained objects. This is useful for very quickly tearing down a + // // container built into scratch memory. + // + // // Currently we are unable to get this reset_lose_memory operation to work correctly + // // as we haven't been able to find a good way to have a deque initialize + // // without allocating memory. We can lose the old memory, but DoInit + // // would necessarily do a ptrArray allocation. And this is not within + // // our definition of how reset_lose_memory works. + // base_type::DoInit(0); + // + //} + + + template + void deque::swap(deque& x) + { + #if defined(EASTL_DEQUE_LEGACY_SWAP_BEHAVIOUR_REQUIRES_COPY_CTOR) && EASTL_DEQUE_LEGACY_SWAP_BEHAVIOUR_REQUIRES_COPY_CTOR + if(mAllocator == x.mAllocator) // If allocators are equivalent... + DoSwap(x); + else // else swap the contents. + { + const this_type temp(*this); // Can't call eastl::swap because that would + *this = x; // itself call this member swap function. + x = temp; + } + #else + // NOTE(rparolin): The previous implementation required T to be copy-constructible in the fall-back case where + // allocators with unique instances copied elements. This was an unnecessary restriction and prevented the common + // usage of deque with non-copyable types (eg. eastl::deque or eastl::deque). + // + // The previous implementation violated the following requirements of deque::swap so the fall-back code has + // been removed. EASTL implicitly defines 'propagate_on_container_swap = false' therefore the fall-back case is + // undefined behaviour. We simply swap the contents and the allocator as that is the common expectation of + // users and does not put the container into an invalid state since it can not free its memory via its current + // allocator instance. + // + DoSwap(x); + #endif + } + + + template + template + void deque::DoInit(Integer n, Integer value, true_type) + { + base_type::DoInit(n); // Call the base uninitialized init function. + DoFillInit(value); + } + + + template + template + void deque::DoInit(InputIterator first, InputIterator last, false_type) + { + typedef typename eastl::iterator_traits::iterator_category IC; + DoInitFromIterator(first, last, IC()); + } + + + template + template + void deque::DoInitFromIterator(InputIterator first, InputIterator last, EASTL_ITC_NS::input_iterator_tag) + { + base_type::DoInit(0); // Call the base uninitialized init function, but don't actually allocate any values. + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + // We have little choice but to iterate through the source iterator and call + // push_back for each item. It can be slow because it will keep reallocating the + // container memory as we go (every kDequeSubarraySize elements). We are not allowed to use distance() on an InputIterator. + for(; first != last; ++first) // InputIterators by definition actually only allow you to iterate through them once. + { // Thus the standard *requires* that we do this (inefficient) implementation. + push_back(*first); // Luckily, InputIterators are in practice almost never used, so this code will likely never get executed. + } + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + clear(); + throw; + } + #endif + } + + + template + template + void deque::DoInitFromIterator(ForwardIterator first, ForwardIterator last, EASTL_ITC_NS::forward_iterator_tag) + { + typedef typename eastl::remove_const::type non_const_iterator_type; // If T is a const type (e.g. const int) then we need to initialize it as if it were non-const. + typedef typename eastl::remove_const::type non_const_value_type; + + const size_type n = (size_type)eastl::distance(first, last); + value_type** pPtrArrayCurrent; + + base_type::DoInit(n); // Call the base uninitialized init function. + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + for(pPtrArrayCurrent = mItBegin.mpCurrentArrayPtr; pPtrArrayCurrent < mItEnd.mpCurrentArrayPtr; ++pPtrArrayCurrent) // Copy to the known-to-be-completely-used subarrays. + { + // We implment an algorithm here whereby we use uninitialized_copy() and advance() instead of just iterating from first to last and constructing as we go. + // The reason for this is that we can take advantage of trivially copyable data types and implement construction as memcpy operations. + ForwardIterator current(first); // To do: Implement a specialization of this algorithm for non-trivially copyable types which eliminates the need for 'current'. + + eastl::advance(current, kDequeSubarraySize); + eastl::uninitialized_copy((non_const_iterator_type)first, (non_const_iterator_type)current, (non_const_value_type*)*pPtrArrayCurrent); + first = current; + } + + eastl::uninitialized_copy((non_const_iterator_type)first, (non_const_iterator_type)last, (non_const_value_type*)mItEnd.mpBegin); + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + for(iterator itCurrent(mItBegin), itEnd(pPtrArrayCurrent, *pPtrArrayCurrent); itCurrent != itEnd; ++itCurrent) + itCurrent.mpCurrent->~value_type(); + throw; + } + #endif + } + + + template + void deque::DoFillInit(const value_type& value) + { + value_type** pPtrArrayCurrent = mItBegin.mpCurrentArrayPtr; + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + while(pPtrArrayCurrent < mItEnd.mpCurrentArrayPtr) + { + eastl::uninitialized_fill(*pPtrArrayCurrent, *pPtrArrayCurrent + kDequeSubarraySize, value); + ++pPtrArrayCurrent; + } + eastl::uninitialized_fill(mItEnd.mpBegin, mItEnd.mpCurrent, value); + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + for(iterator itCurrent(mItBegin), itEnd(pPtrArrayCurrent, *pPtrArrayCurrent); itCurrent != itEnd; ++itCurrent) + itCurrent.mpCurrent->~value_type(); + throw; + } + #endif + } + + + template + template + void deque::DoAssign(Integer n, Integer value, true_type) // false_type means this is the integer version instead of iterator version. + { + DoAssignValues(static_cast(n), static_cast(value)); + } + + + template + template + void deque::DoAssign(InputIterator first, InputIterator last, false_type) // false_type means this is the iterator version instead of integer version. + { + // Actually, the implementation below requires first/last to be a ForwardIterator and not just an InputIterator. + // But Paul Pedriana if you somehow need to work with an InputIterator and we can deal with it. + const size_type n = (size_type)eastl::distance(first, last); + const size_type nSize = size(); + + if(n > nSize) // If we are increasing the size... + { + InputIterator atEnd(first); + + eastl::advance(atEnd, (difference_type)nSize); + eastl::copy(first, atEnd, mItBegin); + insert(mItEnd, atEnd, last); + } + else // n is <= size. + { + iterator itEnd(eastl::copy(first, last, mItBegin)); + + if(n < nSize) // If we need to erase any trailing elements... + erase(itEnd, mItEnd); + } + } + + + template + void deque::DoAssignValues(size_type n, const value_type& value) + { + const size_type nSize = size(); + + if(n > nSize) // If we are increasing the size... + { + eastl::fill(mItBegin, mItEnd, value); + insert(mItEnd, n - nSize, value); + } + else + { + erase(mItBegin + (difference_type)n, mItEnd); + eastl::fill(mItBegin, mItEnd, value); + } + } + + + template + template + typename deque::iterator + deque::DoInsert(const const_iterator& position, Integer n, Integer value, true_type) + { + return DoInsertValues(position, (size_type)n, (value_type)value); + } + + + template + template + typename deque::iterator + deque::DoInsert(const const_iterator& position, const InputIterator& first, const InputIterator& last, false_type) + { + typedef typename eastl::iterator_traits::iterator_category IC; + return DoInsertFromIterator(position, first, last, IC()); + } + + template + template + typename deque::iterator + deque::DoInsertFromIterator(const_iterator position, const InputIterator& first, const InputIterator& last, EASTL_ITC_NS::input_iterator_tag) + { + const difference_type index = eastl::distance(cbegin(), position); +#if EASTL_EXCEPTIONS_ENABLED + try + { +#endif + // We have little choice but to iterate through the source iterator and call + // insert for each item. It can be slow because it will keep reallocating the + // container memory as we go (every kDequeSubarraySize elements). We are not + // allowed to use distance() on an InputIterator. InputIterators by definition + // actually only allow you to iterate through them once. Thus the standard + // *requires* that we do this (inefficient) implementation. Luckily, + // InputIterators are in practice almost never used, so this code will likely + // never get executed. + for (InputIterator iter = first; iter != last; ++iter) + { + position = insert(position, *iter) + 1; + } +#if EASTL_EXCEPTIONS_ENABLED + } + catch (...) + { + erase(cbegin() + index, position); + throw; + } +#endif + + return begin() + index; + } + + template + template + typename deque::iterator + deque::DoInsertFromIterator(const_iterator position, const ForwardIterator& first, const ForwardIterator& last, EASTL_ITC_NS::forward_iterator_tag) + { + const size_type n = (size_type)eastl::distance(first, last); + + // This implementation is nearly identical to DoInsertValues below. + // If you make a bug fix to one, you will likely want to fix the other. + if(position.mpCurrent == mItBegin.mpCurrent) // If inserting at the beginning or into an empty container... + { + iterator itNewBegin(DoReallocSubarray(n, kSideFront)); // itNewBegin to mItBegin refers to memory that isn't initialized yet; so it's not truly a valid iterator. Or at least not a dereferencable one. + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + // We would like to use move here instead of copy when possible, which would be useful for + // when inserting from a std::initializer_list, for example. + // To do: solve this by having a template or runtime parameter which specifies move vs copy. + eastl::uninitialized_copy(first, last, itNewBegin); + mItBegin = itNewBegin; + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + DoFreeSubarrays(itNewBegin.mpCurrentArrayPtr, mItBegin.mpCurrentArrayPtr); + throw; + } + #endif + + return mItBegin; + } + else if(EASTL_UNLIKELY(position.mpCurrent == mItEnd.mpCurrent)) // If inserting at the end (i.e. appending)... + { + const iterator itNewEnd(DoReallocSubarray(n, kSideBack)); // mItEnd to itNewEnd refers to memory that isn't initialized yet; so it's not truly a valid iterator. Or at least not a dereferencable one. + const iterator itFirstInserted(mItEnd); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + // We would like to use move here instead of copy when possible, which would be useful for + // when inserting from a std::initializer_list, for example. + // To do: solve this by having a template or runtime parameter which specifies move vs copy. + eastl::uninitialized_copy(first, last, mItEnd); + mItEnd = itNewEnd; + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + DoFreeSubarrays(mItEnd.mpCurrentArrayPtr + 1, itNewEnd.mpCurrentArrayPtr + 1); + throw; + } + #endif + + return itFirstInserted; + } + else + { + const difference_type nInsertionIndex = position - mItBegin; + const size_type nSize = size(); + + if(nInsertionIndex < (difference_type)(nSize / 2)) // If the insertion index is in the front half of the deque... grow the deque at the front. + { + const iterator itNewBegin(DoReallocSubarray(n, kSideFront)); // itNewBegin to mItBegin refers to memory that isn't initialized yet; so it's not truly a valid iterator. Or at least not a dereferencable one. + const iterator itOldBegin(mItBegin); + const iterator itPosition(mItBegin + nInsertionIndex); // We need to reset this value because the reallocation above can invalidate iterators. + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + // We have a problem here: we would like to use move instead of copy, but it may be that the range to be inserted comes from + // this container and comes from the segment we need to move. So we can't use move operations unless we are careful to handle + // that situation. The newly inserted contents must be contents that were moved to and not moved from. To do: solve this. + if(nInsertionIndex >= (difference_type)n) // If the newly inserted items will be entirely within the old area... + { + iterator itUCopyEnd(mItBegin + (difference_type)n); + + eastl::uninitialized_copy(mItBegin, itUCopyEnd, itNewBegin); // This can throw. + itUCopyEnd = eastl::copy(itUCopyEnd, itPosition, itOldBegin); // Recycle 'itUCopyEnd' to mean something else. + eastl::copy(first, last, itUCopyEnd); + } + else // Else the newly inserted items are going within the newly allocated area at the front. + { + ForwardIterator mid(first); + + eastl::advance(mid, (difference_type)n - nInsertionIndex); + eastl::uninitialized_copy_copy(mItBegin, itPosition, first, mid, itNewBegin); // This can throw. + eastl::copy(mid, last, itOldBegin); + } + mItBegin = itNewBegin; + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + DoFreeSubarrays(itNewBegin.mpCurrentArrayPtr, mItBegin.mpCurrentArrayPtr); + throw; + } + #endif + } + else + { + const iterator itNewEnd(DoReallocSubarray(n, kSideBack)); + const iterator itOldEnd(mItEnd); + const difference_type nPushedCount = (difference_type)nSize - nInsertionIndex; + const iterator itPosition(mItEnd - nPushedCount); // We need to reset this value because the reallocation above can invalidate iterators. + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + // We have a problem here: we would like to use move instead of copy, but it may be that the range to be inserted comes from + // this container and comes from the segment we need to move. So we can't use move operations unless we are careful to handle + // that situation. The newly inserted contents must be contents that were moved to and not moved from. To do: solve this. + if(nPushedCount > (difference_type)n) + { + const iterator itUCopyEnd(mItEnd - (difference_type)n); + + eastl::uninitialized_copy(itUCopyEnd, mItEnd, mItEnd); + eastl::copy_backward(itPosition, itUCopyEnd, itOldEnd); + eastl::copy(first, last, itPosition); + } + else + { + ForwardIterator mid(first); + + eastl::advance(mid, nPushedCount); + eastl::uninitialized_copy_copy(mid, last, itPosition, mItEnd, mItEnd); + eastl::copy(first, mid, itPosition); + } + mItEnd = itNewEnd; + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + DoFreeSubarrays(mItEnd.mpCurrentArrayPtr + 1, itNewEnd.mpCurrentArrayPtr + 1); + throw; + } + #endif + } + + return iterator(mItBegin + nInsertionIndex); + } + } + + + template + typename deque::iterator + deque::DoInsertValues(const_iterator position, size_type n, const value_type& value) + { + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(!(validate_iterator(position) & isf_valid))) + EASTL_FAIL_MSG("deque::insert -- invalid iterator"); + #endif + + // This implementation is nearly identical to DoInsertFromIterator above. + // If you make a bug fix to one, you will likely want to fix the other. + if(position.mpCurrent == mItBegin.mpCurrent) // If inserting at the beginning... + { + const iterator itNewBegin(DoReallocSubarray(n, kSideFront)); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + // Note that we don't make a temp copy of 'value' here. This is because in a + // deque, insertion at either the front or back doesn't cause a reallocation + // or move of data in the middle. That's a key feature of deques, in fact. + eastl::uninitialized_fill(itNewBegin, mItBegin, value); + mItBegin = itNewBegin; + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + DoFreeSubarrays(itNewBegin.mpCurrentArrayPtr, mItBegin.mpCurrentArrayPtr); + throw; + } + #endif + + return mItBegin; + } + else if(EASTL_UNLIKELY(position.mpCurrent == mItEnd.mpCurrent)) // If inserting at the end (i.e. appending)... + { + const iterator itNewEnd(DoReallocSubarray(n, kSideBack)); + const iterator itFirstInserted(mItEnd); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + // Note that we don't make a temp copy of 'value' here. This is because in a + // deque, insertion at either the front or back doesn't cause a reallocation + // or move of data in the middle. That's a key feature of deques, in fact. + eastl::uninitialized_fill(mItEnd, itNewEnd, value); + mItEnd = itNewEnd; + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + DoFreeSubarrays(mItEnd.mpCurrentArrayPtr + 1, itNewEnd.mpCurrentArrayPtr + 1); + throw; + } + #endif + + return itFirstInserted; + } + else + { + // A key purpose of a deque is to implement insertions and removals more efficiently + // than with a vector. We are inserting into the middle of the deque here. A quick and + // dirty implementation of this would be to reallocate the subarrays and simply push + // all values in the middle upward like you would do with a vector. Instead we implement + // the minimum amount of reallocations needed but may need to do some value moving, + // as the subarray sizes need to remain constant and can have no holes in them. + const difference_type nInsertionIndex = position - mItBegin; + const size_type nSize = size(); + const value_type valueSaved(value); + + if(nInsertionIndex < (difference_type)(nSize / 2)) // If the insertion index is in the front half of the deque... grow the deque at the front. + { + const iterator itNewBegin(DoReallocSubarray(n, kSideFront)); + const iterator itOldBegin(mItBegin); + const iterator itPosition(mItBegin + nInsertionIndex); // We need to reset this value because the reallocation above can invalidate iterators. + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + if(nInsertionIndex >= (difference_type)n) // If the newly inserted items will be entirely within the old area... + { + iterator itUCopyEnd(mItBegin + (difference_type)n); + + eastl::uninitialized_move_if_noexcept(mItBegin, itUCopyEnd, itNewBegin); // This can throw. + itUCopyEnd = eastl::move(itUCopyEnd, itPosition, itOldBegin); // Recycle 'itUCopyEnd' to mean something else. + eastl::fill(itUCopyEnd, itPosition, valueSaved); + } + else // Else the newly inserted items are going within the newly allocated area at the front. + { + eastl::uninitialized_move_fill(mItBegin, itPosition, itNewBegin, mItBegin, valueSaved); // This can throw. + eastl::fill(itOldBegin, itPosition, valueSaved); + } + mItBegin = itNewBegin; + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + DoFreeSubarrays(itNewBegin.mpCurrentArrayPtr, mItBegin.mpCurrentArrayPtr); + throw; + } + #endif + + return iterator(mItBegin + nInsertionIndex); + } + else // Else the insertion index is in the back half of the deque, so grow the deque at the back. + { + const iterator itNewEnd(DoReallocSubarray(n, kSideBack)); + const iterator itOldEnd(mItEnd); + const difference_type nPushedCount = (difference_type)nSize - nInsertionIndex; + const iterator itPosition(mItEnd - nPushedCount); // We need to reset this value because the reallocation above can invalidate iterators. + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + if(nPushedCount > (difference_type)n) // If the newly inserted items will be entirely within the old area... + { + iterator itUCopyEnd(mItEnd - (difference_type)n); + + eastl::uninitialized_move_if_noexcept(itUCopyEnd, mItEnd, mItEnd); // This can throw. + itUCopyEnd = eastl::move_backward(itPosition, itUCopyEnd, itOldEnd); // Recycle 'itUCopyEnd' to mean something else. + eastl::fill(itPosition, itUCopyEnd, valueSaved); + } + else // Else the newly inserted items are going within the newly allocated area at the back. + { + eastl::uninitialized_fill_move(mItEnd, itPosition + (difference_type)n, valueSaved, itPosition, mItEnd); // This can throw. + eastl::fill(itPosition, itOldEnd, valueSaved); + } + mItEnd = itNewEnd; + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + DoFreeSubarrays(mItEnd.mpCurrentArrayPtr + 1, itNewEnd.mpCurrentArrayPtr + 1); + throw; + } + #endif + + return iterator(mItBegin + nInsertionIndex); + } + } + } + + + template + inline void deque::DoSwap(this_type& x) + { + eastl::swap(mpPtrArray, x.mpPtrArray); + eastl::swap(mnPtrArraySize, x.mnPtrArraySize); + eastl::swap(mItBegin, x.mItBegin); + eastl::swap(mItEnd, x.mItEnd); + eastl::swap(mAllocator, x.mAllocator); // We do this even if EASTL_ALLOCATOR_COPY_ENABLED is 0. + + } + + + template + inline bool deque::validate() const + { + // To do: More detailed validation. + // To do: Try to make the validation resistant to crashes if the data is invalid. + if((end() - begin()) < 0) + return false; + return true; + } + + + template + inline int deque::validate_iterator(const_iterator i) const + { + // To do: We don't currently track isf_current, will need to make it do so. + // To do: Fix the validation below, as it will not catch all invalid iterators. + if((i - begin()) < 0) + return isf_none; + + if((end() - i) < 0) + return isf_none; + + if(i == end()) + return (isf_valid | isf_current); + + return (isf_valid | isf_current | isf_can_dereference); + } + + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + inline bool operator==(const deque& a, const deque& b) + { + return ((a.size() == b.size()) && eastl::equal(a.begin(), a.end(), b.begin())); + } + +#if defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON) + template + inline synth_three_way_result operator<=>(const deque& a, const deque& b) + { + return eastl::lexicographical_compare_three_way(a.begin(), a.end(), b.begin(), b.end(), synth_three_way{}); + } + +#else + template + inline bool operator!=(const deque& a, const deque& b) + { + return ((a.size() != b.size()) || !eastl::equal(a.begin(), a.end(), b.begin())); + } + + template + inline bool operator<(const deque& a, const deque& b) + { + return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end()); + } + + template + inline bool operator>(const deque& a, const deque& b) + { + return b < a; + } + + template + inline bool operator<=(const deque& a, const deque& b) + { + return !(b < a); + } + + template + inline bool operator>=(const deque& a, const deque& b) + { + return !(a < b); + } +#endif + + template + inline void swap(deque& a, deque& b) + { + a.swap(b); + } + + /////////////////////////////////////////////////////////////////////// + // erase / erase_if + // + // https://en.cppreference.com/w/cpp/container/deque/erase2 + /////////////////////////////////////////////////////////////////////// + template + typename deque::size_type erase(deque& c, const U& value) + { + // Erases all elements that compare equal to value from the container. + auto origEnd = c.end(); + auto newEnd = eastl::remove(c.begin(), origEnd, value); + auto numRemoved = eastl::distance(newEnd, origEnd); + c.erase(newEnd, origEnd); + + // Note: This is technically a lossy conversion when size_type + // is 32bits and ptrdiff_t is 64bits (could happen on 64bit + // systems when EASTL_SIZE_T_32BIT is set). In practice this + // is fine because if EASTL_SIZE_T_32BIT is set then the deque + // should not have more elements than fit in a uint32_t and so + // the distance here should fit in a size_type. + return static_cast::size_type>(numRemoved); + } + + template + typename deque::size_type erase_if(deque& c, Predicate predicate) + { + // Erases all elements that satisfy the predicate pred from the container. + auto origEnd = c.end(); + auto newEnd = eastl::remove_if(c.begin(), origEnd, predicate); + auto numRemoved = eastl::distance(newEnd, origEnd); + c.erase(newEnd, origEnd); + + // Note: This is technically a lossy conversion when size_type + // is 32bits and ptrdiff_t is 64bits (could happen on 64bit + // systems when EASTL_SIZE_T_32BIT is set). In practice this + // is fine because if EASTL_SIZE_T_32BIT is set then the deque + // should not have more elements than fit in a uint32_t and so + // the distance here should fit in a size_type. + return static_cast::size_type>(numRemoved); + } + + + /////////////////////////////////////////////////////////////////////// + // erase_unsorted + // + // This serves a similar purpose as erase above but with the difference + // that it doesn't preserve the relative order of what is left in the + // deque. + // + // Effects: Removes all elements equal to value from the deque while + // optimizing for speed with the potential reordering of elements as a + // side effect. + // + // Complexity: Linear + // + /////////////////////////////////////////////////////////////////////// + template + typename deque::size_type erase_unsorted(deque& c, const U& value) + { + auto itRemove = c.begin(); + auto ritMove = c.rbegin(); + + while(true) + { + itRemove = eastl::find(itRemove, ritMove.base(), value); + if (itRemove == ritMove.base()) // any elements to remove? + break; + + ritMove = eastl::find_if(ritMove, eastl::make_reverse_iterator(itRemove), [&value](const T& elem) { return elem != value; }); + if (itRemove == ritMove.base()) // any elements that can be moved into place? + break; + + *itRemove = eastl::move(*ritMove); + ++itRemove; + ++ritMove; + } + + // now all elements in the range [itRemove, c.end()) are either to be removed or have already been moved from. + + auto origEnd = end(c); + auto numRemoved = distance(itRemove, origEnd); + c.erase(itRemove, origEnd); + + // Note: This is technically a lossy conversion when size_type + // is 32bits and ptrdiff_t is 64bits (could happen on 64bit + // systems when EASTL_SIZE_T_32BIT is set). In practice this + // is fine because if EASTL_SIZE_T_32BIT is set then the deque + // should not have more elements than fit in a uint32_t and so + // the distance here should fit in a size_type. + return static_cast::size_type>(numRemoved); + } + + /////////////////////////////////////////////////////////////////////// + // erase_unsorted_if + // + // This serves a similar purpose as erase_if above but with the + // difference that it doesn't preserve the relative order of what is + // left in the deque. + // + // Effects: Removes all elements that return true for the predicate + // while optimizing for speed with the potential reordering of elements + // as a side effect. + // + // Complexity: Linear + // + /////////////////////////////////////////////////////////////////////// + template + typename deque::size_type erase_unsorted_if(deque& c, Predicate predicate) + { + // Erases all elements that satisfy predicate from the container. + auto itRemove = c.begin(); + auto ritMove = c.rbegin(); + + while(true) + { + itRemove = eastl::find_if(itRemove, ritMove.base(), predicate); + if (itRemove == ritMove.base()) // any elements to remove? + break; + + ritMove = eastl::find_if(ritMove, eastl::make_reverse_iterator(itRemove), not_fn(predicate)); + if (itRemove == ritMove.base()) // any elements that can be moved into place? + break; + + *itRemove = eastl::move(*ritMove); + ++itRemove; + ++ritMove; + } + + // now all elements in the range [itRemove, c.end()) are either to be removed or have already been moved from. + + auto origEnd = end(c); + auto numRemoved = distance(itRemove, origEnd); + c.erase(itRemove, origEnd); + + // Note: This is technically a lossy conversion when size_type + // is 32bits and ptrdiff_t is 64bits (could happen on 64bit + // systems when EASTL_SIZE_T_32BIT is set). In practice this + // is fine because if EASTL_SIZE_T_32BIT is set then the deque + // should not have more elements than fit in a uint32_t and so + // the distance here should fit in a size_type. + return static_cast::size_type>(numRemoved); + } + +} // namespace eastl + + +EA_RESTORE_VC_WARNING(); +#if EASTL_EXCEPTIONS_ENABLED + EA_RESTORE_VC_WARNING(); +#endif + + +#endif // Header include guard diff --git a/external/EASTL/include/EASTL/finally.h b/external/EASTL/include/EASTL/finally.h new file mode 100644 index 00000000..b4ed5803 --- /dev/null +++ b/external/EASTL/include/EASTL/finally.h @@ -0,0 +1,93 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// eastl::finally is an implementation of the popular cpp idiom RAII - Resource +// Acquisition Is Initialization. eastl::finally guarantees that the user +// provided callable will be executed upon whatever mechanism is used to leave +// the current scope. This can guard against user errors but this is a popular +// technique to write robust code in execution environments that have exceptions +// enabled. +// +// Example: +// void foo() +// { +// void* p = malloc(128); +// auto _ = eastl::make_finally([&] { free(p); }); +// +// // Code that may throw an exception... +// +// } // eastl::finally guaranteed to call 'free' at scope exit. +// +// References: +// * https://www.bfilipek.com/2017/04/finalact.html +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_FINALLY_H +#define EASTL_FINALLY_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +#include +#include +#include + +namespace eastl +{ + /////////////////////////////////////////////////////////////////////////// + // finally + // + // finally is the type that calls the users callback on scope exit. + // + template + class finally + { + static_assert(!eastl::is_lvalue_reference_v, "eastl::finally requires the callable is passed as an rvalue reference."); + + Functor m_functor; + bool m_engaged = false; + + public: + finally(Functor f) : m_functor(eastl::move(f)), m_engaged(true) {} + + finally(finally&& other) : m_functor(eastl::move(other.m_functor)), m_engaged(other.m_engaged) + { + other.dismiss(); + } + + ~finally() { execute(); } + + finally(const finally&) = delete; + finally& operator=(const finally&) = delete; + finally& operator=(finally&&) = delete; + + inline void dismiss() { m_engaged = false; } + + inline void execute() + { + if (m_engaged) + m_functor(); + + dismiss(); + } + }; + + + /////////////////////////////////////////////////////////////////////////// + // make_finally + // + // this utility function is the standard mechansim to perform the required + // type deduction on the users provided callback inorder to create a + // 'finally' object. + // + template + auto make_finally(F&& f) + { + return finally(eastl::forward(f)); + } +} + +#endif // EASTL_FINALLY_H diff --git a/external/EASTL/include/EASTL/fixed_allocator.h b/external/EASTL/include/EASTL/fixed_allocator.h new file mode 100644 index 00000000..488eae4a --- /dev/null +++ b/external/EASTL/include/EASTL/fixed_allocator.h @@ -0,0 +1,455 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// This file implements the following +// fixed_allocator +// fixed_allocator_with_overflow +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_FIXED_ALLOCATOR_H +#define EASTL_FIXED_ALLOCATOR_H + + +#include +#include +#include +#include +#include +#include + +EA_DISABLE_ALL_VC_WARNINGS(); + +#include + +EA_RESTORE_ALL_VC_WARNINGS(); + +EA_DISABLE_VC_WARNING(4275); // non dll-interface class used as base for DLL-interface classkey 'identifier' + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + + /////////////////////////////////////////////////////////////////////////// + // fixed_allocator + /////////////////////////////////////////////////////////////////////////// + + /// fixed_allocator + /// + /// Implements an allocator which allocates a single fixed size where + /// the size, alignment, and memory used for the pool is defined at + /// runtime by the user. This is different from fixed containers + /// such as fixed_list whereby the size and alignment are determined + /// at compile time and the memory is directly built into the container's + /// member data. + /// + /// If the pool's memory is exhausted or was never initialized, the + /// allocate function returns NULL. Consider the fixed_allocator_with_overflow + /// class as an alternative in order to deal with this situation. + /// + /// This class requires the user to call container.get_allocator().init() + /// after constructing the container. There currently isn't a way to + /// construct the container with the initialization parameters, though + /// with some effort such a thing could probably be made possible. + /// It's not as simple as it might first seem, due to the non-copyable + /// nature of fixed allocators. A side effect of this limitation is that + /// you cannot copy-construct a container using fixed_allocators. + /// + /// Another side-effect is that you cannot swap two containers using + /// a fixed_allocator, as a swap requires temporary memory allocated by + /// an equivalent allocator, and such a thing cannot be done implicitly. + /// A workaround for the swap limitation is that you can implement your + /// own swap whereby you provide an explicitly created temporary object. + /// + /// Note: Be careful to set the allocator's node size to the size of the + /// container node and not the size of the contained object. Note that the + /// example code below uses IntListNode. + /// + /// Example usage: + /// typedef eastl::list IntList; + /// typedef IntList::node_type IntListNode; + /// + /// IntListNode buffer[200]; + /// IntList intList; + /// intList.get_allocator().init(buffer, sizeof(buffer), sizeof(IntListNode), __alignof(IntListNode)); + /// + class EASTL_API fixed_allocator : public fixed_pool_base + { + public: + /// fixed_allocator + /// + /// Default constructor. The user usually will need to call init() after + /// constructing via this constructor. + /// + fixed_allocator(const char* /*pName*/ = EASTL_FIXED_POOL_DEFAULT_NAME) + : fixed_pool_base(NULL) + { + } + + + /// fixed_allocator + /// + /// Copy constructor. The user usually will need to call init() after + /// constructing via this constructor. By their nature, fixed-allocators + /// cannot be copied in any useful way, as by their nature the user + /// must manually initialize them. + /// + fixed_allocator(const fixed_allocator&) + : fixed_pool_base(NULL) + { + } + + + /// operator= + /// + /// By their nature, fixed-allocators cannot be copied in any + /// useful way, as by their nature the user must manually + /// initialize them. + /// + fixed_allocator& operator=(const fixed_allocator&) + { + return *this; + } + + + // init + // + // No init here, as the base class version is sufficient. + // + //void init(void* pMemory, size_t memorySize, size_t nodeSize, + // size_t alignment, size_t alignmentOffset = 0); + + + /// allocate + /// + /// Allocates a new object of the size specified upon class initialization. + /// Returns NULL if there is no more memory. + /// + void* allocate(size_t n, int /*flags*/ = 0) + { + // To consider: Verify that 'n' is what the user initialized us with. + + Link* pLink = mpHead; + + if(pLink) // If we have space... + { + #if EASTL_FIXED_SIZE_TRACKING_ENABLED + if(++mnCurrentSize > mnPeakSize) + mnPeakSize = mnCurrentSize; + #endif + + mpHead = pLink->mpNext; + return pLink; + } + else + { + // If there's no free node in the free list, just + // allocate another from the reserved memory area + + if(mpNext != mpCapacity) + { + pLink = mpNext; + + mpNext = reinterpret_cast(reinterpret_cast(mpNext) + n); + + #if EASTL_FIXED_SIZE_TRACKING_ENABLED + if(++mnCurrentSize > mnPeakSize) + mnPeakSize = mnCurrentSize; + #endif + + return pLink; + } + + // EASTL_ASSERT(false); To consider: enable this assert. However, we intentionally disable it because this isn't necessarily an assertable error. + return NULL; + } + } + + + /// allocate + /// + void* allocate(size_t n, size_t /*alignment*/, size_t /*offset*/, int flags = 0) + { + return allocate(n, flags); + } + + + /// deallocate + /// + /// Frees the given object which was allocated by allocate(). + /// If the given node was not allocated by allocate() then the behaviour + /// is undefined. + /// + void deallocate(void* p, size_t) + { + #if EASTL_FIXED_SIZE_TRACKING_ENABLED + --mnCurrentSize; + #endif + + ((Link*)p)->mpNext = mpHead; + mpHead = ((Link*)p); + } + + + using fixed_pool_base::can_allocate; + + + const char* get_name() const + { + return EASTL_FIXED_POOL_DEFAULT_NAME; + } + + + void set_name(const char*) + { + // Nothing to do. We don't allocate memory. + } + + }; // fixed_allocator + + bool operator==(const fixed_allocator& a, const fixed_allocator& b); + bool operator!=(const fixed_allocator& a, const fixed_allocator& b); + + + + /////////////////////////////////////////////////////////////////////////// + // fixed_allocator_with_overflow + /////////////////////////////////////////////////////////////////////////// + + /// fixed_allocator_with_overflow + /// + /// Implements an allocator which allocates a single fixed size where + /// the size, alignment, and memory used for the pool is defined at + /// runtime by the user. This is different from fixed containers + /// such as fixed_list whereby the size and alignment are determined + /// at compile time and the memory is directly built into the container's + /// member data. + /// + /// Note: Be careful to set the allocator's node size to the size of the + /// container node and not the size of the contained object. Note that the + /// example code below uses IntListNode. + /// + /// This class requires the user to call container.get_allocator().init() + /// after constructing the container. There currently isn't a way to + /// construct the container with the initialization parameters, though + /// with some effort such a thing could probably be made possible. + /// It's not as simple as it might first seem, due to the non-copyable + /// nature of fixed allocators. A side effect of this limitation is that + /// you cannot copy-construct a container using fixed_allocators. + /// + /// Another side-effect is that you cannot swap two containers using + /// a fixed_allocator, as a swap requires temporary memory allocated by + /// an equivalent allocator, and such a thing cannot be done implicitly. + /// A workaround for the swap limitation is that you can implement your + /// own swap whereby you provide an explicitly created temporary object. + /// + /// Example usage: + /// typedef eastl::list IntList; + /// typedef IntList::node_type IntListNode; + /// + /// IntListNode buffer[200]; + /// IntList intList; + /// intList.get_allocator().init(buffer, sizeof(buffer), sizeof(IntListNode), __alignof(IntListNode)); + /// + class EASTL_API fixed_allocator_with_overflow : public fixed_pool_base + { + public: + /// fixed_allocator_with_overflow + /// + /// Default constructor. The user usually will need to call init() after + /// constructing via this constructor. + /// + fixed_allocator_with_overflow(const char* pName = EASTL_FIXED_POOL_DEFAULT_NAME) + : fixed_pool_base(NULL) + , mOverflowAllocator(pName) + , mpPoolBegin(nullptr) + , mpPoolEnd(nullptr) + , mnNodeSize(0) + { + } + + + /// fixed_allocator_with_overflow + /// + /// Copy constructor. The user usually will need to call init() after + /// constructing via this constructor. By their nature, fixed-allocators + /// cannot be copied in any useful way, as by their nature the user + /// must manually initialize them. + /// + fixed_allocator_with_overflow(const fixed_allocator_with_overflow&) + : fixed_pool_base(NULL) + , mpPoolBegin(nullptr) + , mpPoolEnd(nullptr) + , mnNodeSize(0) + { + } + + + /// operator= + /// + /// By their nature, fixed-allocators cannot be copied in any + /// useful way, as by their nature the user must manually + /// initialize them. + /// + fixed_allocator_with_overflow& operator=(const fixed_allocator_with_overflow& x) + { + #if EASTL_ALLOCATOR_COPY_ENABLED + mOverflowAllocator = x.mOverflowAllocator; + #else + (void)x; + #endif + + return *this; + } + + + /// init + /// + void init(void* pMemory, size_t memorySize, size_t nodeSize, + size_t alignment, size_t alignmentOffset = 0) + { + fixed_pool_base::init(pMemory, memorySize, nodeSize, alignment, alignmentOffset); + + mpPoolBegin = pMemory; + mpPoolEnd = (void*)((uintptr_t)pMemory + memorySize); + mnNodeSize = (eastl_size_t)nodeSize; + } + + + /// allocate + /// + /// Allocates a new object of the size specified upon class initialization. + /// Returns NULL if there is no more memory. + /// + void* allocate(size_t /*n*/, int /*flags*/ = 0) + { + // To consider: Verify that 'n' is what the user initialized us with. + + void* p; + + if(mpHead) // If we have space... + { + p = mpHead; + mpHead = mpHead->mpNext; + } + else + { + // If there's no free node in the free list, just + // allocate another from the reserved memory area + + if (mpNext != mpCapacity) + { + p = mpNext; + mpNext = reinterpret_cast(reinterpret_cast(mpNext) + mnNodeSize); + } + else + p = mOverflowAllocator.allocate(mnNodeSize); + } + + #if EASTL_FIXED_SIZE_TRACKING_ENABLED + if(p && (++mnCurrentSize > mnPeakSize)) + mnPeakSize = mnCurrentSize; + #endif + + return p; + } + + + /// allocate + /// + void* allocate(size_t n, size_t /*alignment*/, size_t /*offset*/, int flags = 0) + { + return allocate(n, flags); + } + + + /// deallocate + /// + /// Frees the given object which was allocated by allocate(). + /// If the given node was not allocated by allocate() then the behaviour + /// is undefined. + /// + void deallocate(void* p, size_t) + { + #if EASTL_FIXED_SIZE_TRACKING_ENABLED + --mnCurrentSize; + #endif + + if((p >= mpPoolBegin) && (p < mpPoolEnd)) + { + ((Link*)p)->mpNext = mpHead; + mpHead = ((Link*)p); + } + else + mOverflowAllocator.deallocate(p, (size_t)mnNodeSize); + } + + + using fixed_pool_base::can_allocate; + + + const char* get_name() const + { + return mOverflowAllocator.get_name(); + } + + + void set_name(const char* pName) + { + mOverflowAllocator.set_name(pName); + } + + protected: + EASTLAllocatorType mOverflowAllocator; // To consider: Allow the user to define the type of this, presumably via a template parameter. + void* mpPoolBegin; // To consider: We have these member variables and ideally we shouldn't need them. The problem is that + void* mpPoolEnd; // the information about the pool buffer and object size is stored in the owning container + eastl_size_t mnNodeSize; // and we can't have access to it without increasing the amount of code we need and by templating + // more code. It may turn out that simply storing data here is smaller in the end. + }; // fixed_allocator_with_overflow // Granted, this class is usually used for debugging purposes, but perhaps there is an elegant solution. + + bool operator==(const fixed_allocator_with_overflow& a, const fixed_allocator_with_overflow& b); + bool operator!=(const fixed_allocator_with_overflow& a, const fixed_allocator_with_overflow& b); + + + + + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + inline bool operator==(const fixed_allocator&, const fixed_allocator&) + { + return false; + } + + inline bool operator!=(const fixed_allocator&, const fixed_allocator&) + { + return false; + } + + inline bool operator==(const fixed_allocator_with_overflow&, const fixed_allocator_with_overflow&) + { + return false; + } + + inline bool operator!=(const fixed_allocator_with_overflow&, const fixed_allocator_with_overflow&) + { + return false; + } + + +} // namespace eastl + + +EA_RESTORE_VC_WARNING(); + +#endif // Header include guard diff --git a/external/EASTL/include/EASTL/fixed_function.h b/external/EASTL/include/EASTL/fixed_function.h new file mode 100644 index 00000000..6aed768a --- /dev/null +++ b/external/EASTL/include/EASTL/fixed_function.h @@ -0,0 +1,218 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_FIXED_FUNCTION_H +#define EASTL_FIXED_FUNCTION_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +#include + +namespace eastl +{ + template + class fixed_function; + + namespace internal + { + template + struct is_fixed_function + : public eastl::false_type {}; + + template + struct is_fixed_function> + : public eastl::true_type {}; + + template + EA_CONSTEXPR bool is_fixed_function_v = is_fixed_function::value; + } + + #define EASTL_INTERNAL_FIXED_FUNCTION_STATIC_ASSERT(TYPE) \ + static_assert(sizeof(TYPE) <= sizeof(typename Base::FunctorStorageType), \ + "fixed_function local buffer is not large enough to hold the callable object.") + + #define EASTL_INTERNAL_FIXED_FUNCTION_NEW_SIZE_STATIC_ASSERT(NEW_SIZE_IN_BYTES) \ + static_assert(SIZE_IN_BYTES >= NEW_SIZE_IN_BYTES, \ + "fixed_function local buffer is not large enough to hold the new fixed_function type.") + + template + using EASTL_DISABLE_OVERLOAD_IF_FIXED_FUNCTION = + eastl::disable_if_t>>; + + + // fixed_function + // + template + class fixed_function : public internal::function_detail + { + using Base = internal::function_detail; + + public: + using typename Base::result_type; + + fixed_function() EA_NOEXCEPT = default; + fixed_function(std::nullptr_t p) EA_NOEXCEPT + : Base(p) + { + } + + fixed_function(const fixed_function& other) + : Base(other) + { + } + + fixed_function(fixed_function&& other) + : Base(eastl::move(other)) + { + } + + template > + fixed_function(Functor functor) + : Base(eastl::move(functor)) + { + EASTL_INTERNAL_FIXED_FUNCTION_STATIC_ASSERT(Functor); + } + + template + fixed_function(const fixed_function& other) + : Base(other) + { + EASTL_INTERNAL_FIXED_FUNCTION_NEW_SIZE_STATIC_ASSERT(NEW_SIZE_IN_BYTES); + } + + template + fixed_function(fixed_function&& other) + : Base(eastl::move(other)) + { + EASTL_INTERNAL_FIXED_FUNCTION_NEW_SIZE_STATIC_ASSERT(NEW_SIZE_IN_BYTES); + } + + ~fixed_function() EA_NOEXCEPT = default; + + fixed_function& operator=(const fixed_function& other) + { + Base::operator=(other); + return *this; + } + + fixed_function& operator=(fixed_function&& other) + { + Base::operator=(eastl::move(other)); + return *this; + } + + fixed_function& operator=(std::nullptr_t p) EA_NOEXCEPT + { + Base::operator=(p); + return *this; + } + + template + fixed_function& operator=(const fixed_function& other) + { + EASTL_INTERNAL_FIXED_FUNCTION_NEW_SIZE_STATIC_ASSERT(NEW_SIZE_IN_BYTES); + + Base::operator=(other); + return *this; + } + + template + fixed_function& operator=(fixed_function&& other) + { + EASTL_INTERNAL_FIXED_FUNCTION_NEW_SIZE_STATIC_ASSERT(NEW_SIZE_IN_BYTES); + + Base::operator=(eastl::move(other)); + return *this; + } + + template > + fixed_function& operator=(Functor&& functor) + { + EASTL_INTERNAL_FIXED_FUNCTION_STATIC_ASSERT(eastl::decay_t); + Base::operator=(eastl::forward(functor)); + return *this; + } + + template + fixed_function& operator=(eastl::reference_wrapper f) EA_NOEXCEPT + { + EASTL_INTERNAL_FIXED_FUNCTION_STATIC_ASSERT(eastl::reference_wrapper); + Base::operator=(f); + return *this; + } + + void swap(fixed_function& other) EA_NOEXCEPT + { + Base::swap(other); + } + + explicit operator bool() const EA_NOEXCEPT + { + return Base::operator bool(); + } + + R operator ()(Args... args) const + { + return Base::operator ()(eastl::forward(args)...); + } + + #if EASTL_RTTI_ENABLED + const std::type_info& target_type() const EA_NOEXCEPT + { + return Base::target_type(); + } + + template + Functor* target() EA_NOEXCEPT + { + return Base::target(); + } + + template + const Functor* target() const EA_NOEXCEPT + { + return Base::target(); + } + #endif + }; + + template + bool operator==(const fixed_function& f, std::nullptr_t) EA_NOEXCEPT + { + return !f; + } + + template + bool operator==(std::nullptr_t, const fixed_function& f) EA_NOEXCEPT + { + return !f; + } + + template + bool operator!=(const fixed_function& f, std::nullptr_t) EA_NOEXCEPT + { + return !!f; + } + + template + bool operator!=(std::nullptr_t, const fixed_function& f) EA_NOEXCEPT + { + return !!f; + } + + template + void swap(fixed_function& lhs, fixed_function& rhs) + { + lhs.swap(rhs); + } + +} // namespace eastl + +#endif // EASTL_FIXED_FUNCTION_H diff --git a/external/EASTL/include/EASTL/fixed_hash_map.h b/external/EASTL/include/EASTL/fixed_hash_map.h new file mode 100644 index 00000000..c8087c18 --- /dev/null +++ b/external/EASTL/include/EASTL/fixed_hash_map.h @@ -0,0 +1,830 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// This file implements a hash_map and hash_multimap which use a fixed size +// memory pool for its buckets and nodes. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_FIXED_HASH_MAP_H +#define EASTL_FIXED_HASH_MAP_H + + +#include +#include + +EA_DISABLE_VC_WARNING(4127) // Conditional expression is constant + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + +namespace eastl +{ + /// EASTL_FIXED_HASH_MAP_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// In the case of fixed-size containers, the allocator name always refers + /// to overflow allocations. + /// + #ifndef EASTL_FIXED_HASH_MAP_DEFAULT_NAME + #define EASTL_FIXED_HASH_MAP_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_hash_map" // Unless the user overrides something, this is "EASTL fixed_hash_map". + #endif + + #ifndef EASTL_FIXED_HASH_MULTIMAP_DEFAULT_NAME + #define EASTL_FIXED_HASH_MULTIMAP_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_hash_multimap" // Unless the user overrides something, this is "EASTL fixed_hash_multimap". + #endif + + + /// EASTL_FIXED_HASH_MAP_DEFAULT_ALLOCATOR + /// EASTL_FIXED_HASH_MULTIMAP_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_FIXED_HASH_MAP_DEFAULT_ALLOCATOR + #define EASTL_FIXED_HASH_MAP_DEFAULT_ALLOCATOR overflow_allocator_type(EASTL_FIXED_HASH_MAP_DEFAULT_NAME) + #endif + + #ifndef EASTL_FIXED_HASH_MULTIMAP_DEFAULT_ALLOCATOR + #define EASTL_FIXED_HASH_MULTIMAP_DEFAULT_ALLOCATOR overflow_allocator_type(EASTL_FIXED_HASH_MULTIMAP_DEFAULT_NAME) + #endif + + + + /// fixed_hash_map + /// + /// Implements a hash_map with a fixed block of memory identified by the nodeCount and bucketCount + /// template parameters. + /// + /// Template parameters: + /// Key The key type for the map. This is a map of Key to T (value). + /// T The value type for the map. + /// nodeCount The max number of objects to contain. This value must be >= 1. + /// bucketCount The number of buckets to use. This value must be >= 2. + /// bEnableOverflow Whether or not we should use the global heap if our object pool is exhausted. + /// Hash hash_set hash function. See hash_set. + /// Predicate hash_set equality testing function. See hash_set. + /// + template , typename Predicate = eastl::equal_to, bool bCacheHashCode = false, typename OverflowAllocator = EASTLAllocatorType> + class fixed_hash_map : public hash_map::node_type), + nodeCount, + EASTL_ALIGN_OF(eastl::pair), + 0, + bEnableOverflow, + OverflowAllocator>, + bCacheHashCode> + { + public: + typedef fixed_hashtable_allocator::node_type), nodeCount, EASTL_ALIGN_OF(eastl::pair), 0, + bEnableOverflow, OverflowAllocator> fixed_allocator_type; + typedef typename fixed_allocator_type::overflow_allocator_type overflow_allocator_type; + typedef hash_map base_type; + typedef fixed_hash_map this_type; + typedef typename base_type::value_type value_type; + typedef typename base_type::node_type node_type; + typedef typename base_type::size_type size_type; + + enum { kMaxSize = nodeCount }; + + using base_type::clear; + + protected: + using base_type::mAllocator; + + node_type** mBucketBuffer[bucketCount + 1]; // '+1' because the hash table needs a null terminating bucket. + char mNodeBuffer[fixed_allocator_type::kBufferSize]; // kBufferSize will take into account alignment requirements. + + public: + explicit fixed_hash_map(const overflow_allocator_type& overflowAllocator); + + explicit fixed_hash_map(const Hash& hashFunction = Hash(), + const Predicate& predicate = Predicate()); + + fixed_hash_map(const Hash& hashFunction, + const Predicate& predicate, + const overflow_allocator_type& overflowAllocator); + + template + fixed_hash_map(InputIterator first, InputIterator last, + const Hash& hashFunction = Hash(), + const Predicate& predicate = Predicate()); + + fixed_hash_map(const this_type& x); + fixed_hash_map(this_type&& x); + fixed_hash_map(this_type&& x, const overflow_allocator_type& overflowAllocator); + fixed_hash_map(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator = EASTL_FIXED_HASH_MAP_DEFAULT_ALLOCATOR); + + this_type& operator=(const this_type& x); + this_type& operator=(std::initializer_list ilist); + this_type& operator=(this_type&& x); + + void swap(this_type& x); + + void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs. + + size_type max_size() const; + + const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT; + overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT; + void set_overflow_allocator(const overflow_allocator_type& allocator); + + void clear(bool clearBuckets); + }; // fixed_hash_map + + + + + + /// fixed_hash_multimap + /// + /// Implements a hash_multimap with a fixed block of memory identified by the nodeCount and bucketCount + /// template parameters. + /// + /// Template parameters: + /// Key The key type for the map. This is a map of Key to T (value). + /// T The value type for the map. + /// nodeCount The max number of objects to contain. This value must be >= 1. + /// bucketCount The number of buckets to use. This value must be >= 2. + /// bEnableOverflow Whether or not we should use the global heap if our object pool is exhausted. + /// Hash hash_set hash function. See hash_set. + /// Predicate hash_set equality testing function. See hash_set. + /// + template , typename Predicate = eastl::equal_to, bool bCacheHashCode = false, typename OverflowAllocator = EASTLAllocatorType> + class fixed_hash_multimap : public hash_multimap::node_type), + nodeCount, + EASTL_ALIGN_OF(eastl::pair), + 0, + bEnableOverflow, + OverflowAllocator>, + bCacheHashCode> + { + public: + typedef fixed_hashtable_allocator::node_type), nodeCount, EASTL_ALIGN_OF(eastl::pair), 0, + bEnableOverflow, OverflowAllocator> fixed_allocator_type; + typedef typename fixed_allocator_type::overflow_allocator_type overflow_allocator_type; + typedef hash_multimap base_type; + typedef fixed_hash_multimap this_type; + typedef typename base_type::value_type value_type; + typedef typename base_type::node_type node_type; + typedef typename base_type::size_type size_type; + + enum { kMaxSize = nodeCount }; + + using base_type::clear; + + protected: + using base_type::mAllocator; + + node_type** mBucketBuffer[bucketCount + 1]; // '+1' because the hash table needs a null terminating bucket. + char mNodeBuffer[fixed_allocator_type::kBufferSize]; // kBufferSize will take into account alignment requirements. + + public: + explicit fixed_hash_multimap(const overflow_allocator_type& overflowAllocator); + + explicit fixed_hash_multimap(const Hash& hashFunction = Hash(), + const Predicate& predicate = Predicate()); + + fixed_hash_multimap(const Hash& hashFunction, + const Predicate& predicate, + const overflow_allocator_type& overflowAllocator); + + template + fixed_hash_multimap(InputIterator first, InputIterator last, + const Hash& hashFunction = Hash(), + const Predicate& predicate = Predicate()); + + fixed_hash_multimap(const this_type& x); + fixed_hash_multimap(this_type&& x); + fixed_hash_multimap(this_type&& x, const overflow_allocator_type& overflowAllocator); + fixed_hash_multimap(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator = EASTL_FIXED_HASH_MULTIMAP_DEFAULT_ALLOCATOR); + + this_type& operator=(const this_type& x); + this_type& operator=(std::initializer_list ilist); + this_type& operator=(this_type&& x); + + void swap(this_type& x); + + void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs. + + size_type max_size() const; + + const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT; + overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT; + void set_overflow_allocator(const overflow_allocator_type& allocator); + + void clear(bool clearBuckets); + }; // fixed_hash_multimap + + + + + + + /////////////////////////////////////////////////////////////////////// + // fixed_hash_map + /////////////////////////////////////////////////////////////////////// + + template + inline fixed_hash_map:: + fixed_hash_map(const overflow_allocator_type& overflowAllocator) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), Hash(), + Predicate(), fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator)) + { + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_HASH_MAP_DEFAULT_NAME); + #endif + + mAllocator.reset(mNodeBuffer); + } + + + template + inline fixed_hash_map:: + fixed_hash_map(const Hash& hashFunction, + const Predicate& predicate) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), hashFunction, + predicate, fixed_allocator_type(NULL, mBucketBuffer)) + { + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if (!bEnableOverflow) + { + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + } + + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_HASH_MAP_DEFAULT_NAME); + #endif + + mAllocator.reset(mNodeBuffer); + } + + + template + inline fixed_hash_map:: + fixed_hash_map(const Hash& hashFunction, + const Predicate& predicate, + const overflow_allocator_type& overflowAllocator) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), hashFunction, + predicate, fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator)) + { + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if (!bEnableOverflow) + { + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + } + + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_HASH_MAP_DEFAULT_NAME); + #endif + + mAllocator.reset(mNodeBuffer); + } + + + template + template + fixed_hash_map:: + fixed_hash_map(InputIterator first, InputIterator last, + const Hash& hashFunction, + const Predicate& predicate) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), hashFunction, + predicate, fixed_allocator_type(NULL, mBucketBuffer)) + { + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_HASH_MAP_DEFAULT_NAME); + #endif + + mAllocator.reset(mNodeBuffer); + base_type::insert(first, last); + } + + + template + inline fixed_hash_map:: + fixed_hash_map(const this_type& x) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), x.hash_function(), + x.key_eq(), fixed_allocator_type(NULL, mBucketBuffer)) + { + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + mAllocator.reset(mNodeBuffer); + base_type::insert(x.begin(), x.end()); + } + + + template + inline fixed_hash_map:: + fixed_hash_map(this_type&& x) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), x.hash_function(), + x.key_eq(), fixed_allocator_type(NULL, mBucketBuffer)) + { + // This implementation is the same as above. If we could rely on using C++11 delegating constructor support then we could just call that here. + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + mAllocator.reset(mNodeBuffer); + base_type::insert(x.begin(), x.end()); + } + + + template + inline fixed_hash_map:: + fixed_hash_map(this_type&& x, const overflow_allocator_type& overflowAllocator) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), x.hash_function(), + x.key_eq(), fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator)) + { + // This implementation is the same as above. If we could rely on using C++11 delegating constructor support then we could just call that here. + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + mAllocator.reset(mNodeBuffer); + base_type::insert(x.begin(), x.end()); + } + + + template + inline fixed_hash_map:: + fixed_hash_map(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), Hash(), + Predicate(), fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator)) + { + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_HASH_MAP_DEFAULT_NAME); + #endif + + mAllocator.reset(mNodeBuffer); + base_type::insert(ilist.begin(), ilist.end()); + } + + + template + inline typename fixed_hash_map::this_type& + fixed_hash_map::operator=(const this_type& x) + { + base_type::operator=(x); + return *this; + } + + + template + inline typename fixed_hash_map::this_type& + fixed_hash_map::operator=(this_type&& x) + { + base_type::operator=(x); + return *this; + } + + + template + inline typename fixed_hash_map::this_type& + fixed_hash_map::operator=(std::initializer_list ilist) + { + base_type::clear(); + base_type::insert(ilist.begin(), ilist.end()); + return *this; + } + + + template + inline void fixed_hash_map:: + swap(this_type& x) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(*this, x); + } + + + template + inline void fixed_hash_map:: + reset_lose_memory() + { + base_type::mnBucketCount = (size_type)base_type::mRehashPolicy.GetPrevBucketCount((uint32_t)bucketCount); + base_type::mnElementCount = 0; + base_type::mRehashPolicy.mnNextResize = 0; + base_type::get_allocator().reset(mNodeBuffer); + } + + + template + inline typename fixed_hash_map::size_type + fixed_hash_map::max_size() const + { + return kMaxSize; + } + + + template + inline const typename fixed_hash_map::overflow_allocator_type& + fixed_hash_map::get_overflow_allocator() const EA_NOEXCEPT + { + return mAllocator.get_overflow_allocator(); + } + + + template + inline typename fixed_hash_map::overflow_allocator_type& + fixed_hash_map::get_overflow_allocator() EA_NOEXCEPT + { + return mAllocator.get_overflow_allocator(); + } + + + template + inline void fixed_hash_map:: + set_overflow_allocator(const overflow_allocator_type& allocator) + { + mAllocator.set_overflow_allocator(allocator); + } + + + template + inline void fixed_hash_map:: + clear(bool clearBuckets) + { + base_type::DoFreeNodes(base_type::mpBucketArray, base_type::mnBucketCount); + if(clearBuckets) + { + base_type::DoFreeBuckets(base_type::mpBucketArray, base_type::mnBucketCount); + reset_lose_memory(); + } + base_type::mpBucketArray = (node_type**)mBucketBuffer; + base_type::mnElementCount = 0; + } + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + inline void swap(fixed_hash_map& a, + fixed_hash_map& b) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(a, b); + } + + + + + /////////////////////////////////////////////////////////////////////// + // fixed_hash_multimap + /////////////////////////////////////////////////////////////////////// + + template + inline fixed_hash_multimap:: + fixed_hash_multimap(const overflow_allocator_type& overflowAllocator) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), Hash(), + Predicate(), fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator)) + { + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if (!bEnableOverflow) + { + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + } + + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_HASH_MULTIMAP_DEFAULT_NAME); + #endif + + mAllocator.reset(mNodeBuffer); + } + + + template + inline fixed_hash_multimap:: + fixed_hash_multimap(const Hash& hashFunction, + const Predicate& predicate) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), hashFunction, + predicate, fixed_allocator_type(NULL, mBucketBuffer)) + { + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_HASH_MULTIMAP_DEFAULT_NAME); + #endif + + mAllocator.reset(mNodeBuffer); + } + + + template + inline fixed_hash_multimap:: + fixed_hash_multimap(const Hash& hashFunction, + const Predicate& predicate, + const overflow_allocator_type& overflowAllocator) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), hashFunction, + predicate, fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator)) + { + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_HASH_MULTIMAP_DEFAULT_NAME); + #endif + + mAllocator.reset(mNodeBuffer); + } + + + template + template + fixed_hash_multimap:: + fixed_hash_multimap(InputIterator first, InputIterator last, + const Hash& hashFunction, + const Predicate& predicate) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), hashFunction, + predicate, fixed_allocator_type(NULL, mBucketBuffer)) + { + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_HASH_MULTIMAP_DEFAULT_NAME); + #endif + + mAllocator.reset(mNodeBuffer); + base_type::insert(first, last); + } + + + template + inline fixed_hash_multimap:: + fixed_hash_multimap(const this_type& x) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), x.hash_function(), + x.key_eq(),fixed_allocator_type(NULL, mBucketBuffer)) + { + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + mAllocator.reset(mNodeBuffer); + base_type::insert(x.begin(), x.end()); + } + + + template + inline fixed_hash_multimap:: + fixed_hash_multimap(this_type&& x) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), x.hash_function(), + x.key_eq(),fixed_allocator_type(NULL, mBucketBuffer)) + { + // This implementation is the same as above. If we could rely on using C++11 delegating constructor support then we could just call that here. + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + mAllocator.reset(mNodeBuffer); + base_type::insert(x.begin(), x.end()); + } + + + template + inline fixed_hash_multimap:: + fixed_hash_multimap(this_type&& x, const overflow_allocator_type& overflowAllocator) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), x.hash_function(), + x.key_eq(), fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator)) + { + // This implementation is the same as above. If we could rely on using C++11 delegating constructor support then we could just call that here. + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + mAllocator.reset(mNodeBuffer); + base_type::insert(x.begin(), x.end()); + } + + + template + inline fixed_hash_multimap:: + fixed_hash_multimap(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), Hash(), + Predicate(), fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator)) + { + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_HASH_MULTIMAP_DEFAULT_NAME); + #endif + + mAllocator.reset(mNodeBuffer); + base_type::insert(ilist.begin(), ilist.end()); + } + + + template + inline typename fixed_hash_multimap::this_type& + fixed_hash_multimap::operator=(const this_type& x) + { + base_type::operator=(x); + return *this; + } + + + template + inline typename fixed_hash_multimap::this_type& + fixed_hash_multimap::operator=(this_type&& x) + { + base_type::operator=(x); + return *this; + } + + + template + inline typename fixed_hash_multimap::this_type& + fixed_hash_multimap::operator=(std::initializer_list ilist) + { + base_type::clear(); + base_type::insert(ilist.begin(), ilist.end()); + return *this; + } + + + template + inline void fixed_hash_multimap:: + swap(this_type& x) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(*this, x); + } + + + template + inline void fixed_hash_multimap:: + reset_lose_memory() + { + base_type::mnBucketCount = (size_type)base_type::mRehashPolicy.GetPrevBucketCount((uint32_t)bucketCount); + base_type::mnElementCount = 0; + base_type::mRehashPolicy.mnNextResize = 0; + base_type::get_allocator().reset(mNodeBuffer); + } + + + template + inline typename fixed_hash_multimap::size_type + fixed_hash_multimap::max_size() const + { + return kMaxSize; + } + + + template + inline const typename fixed_hash_multimap::overflow_allocator_type& + fixed_hash_multimap::get_overflow_allocator() const EA_NOEXCEPT + { + return mAllocator.get_overflow_allocator(); + } + + + template + inline typename fixed_hash_multimap::overflow_allocator_type& + fixed_hash_multimap::get_overflow_allocator() EA_NOEXCEPT + { + return mAllocator.get_overflow_allocator(); + } + + + template + inline void fixed_hash_multimap::set_overflow_allocator(const overflow_allocator_type& allocator) + { + mAllocator.set_overflow_allocator(allocator); + } + + + template + inline void fixed_hash_multimap:: + clear(bool clearBuckets) + { + base_type::DoFreeNodes(base_type::mpBucketArray, base_type::mnBucketCount); + if(clearBuckets) + { + base_type::DoFreeBuckets(base_type::mpBucketArray, base_type::mnBucketCount); + reset_lose_memory(); + } + base_type::mpBucketArray = (node_type**)mBucketBuffer; + base_type::mnElementCount = 0; + } + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + inline void swap(fixed_hash_multimap& a, + fixed_hash_multimap& b) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(a, b); + } + + + +} // namespace eastl + +EA_RESTORE_VC_WARNING() + +#endif // Header include guard + + + + + + + + + + + + diff --git a/external/EASTL/include/EASTL/fixed_hash_set.h b/external/EASTL/include/EASTL/fixed_hash_set.h new file mode 100644 index 00000000..1b259600 --- /dev/null +++ b/external/EASTL/include/EASTL/fixed_hash_set.h @@ -0,0 +1,790 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// This file implements a hash_set which uses a fixed size memory pool for +// its buckets and nodes. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_FIXED_HASH_SET_H +#define EASTL_FIXED_HASH_SET_H + + +#include +#include + +EA_DISABLE_VC_WARNING(4127) // Conditional expression is constant + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + /// EASTL_FIXED_HASH_SET_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// In the case of fixed-size containers, the allocator name always refers + /// to overflow allocations. + /// + #ifndef EASTL_FIXED_HASH_SET_DEFAULT_NAME + #define EASTL_FIXED_HASH_SET_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_hash_set" // Unless the user overrides something, this is "EASTL fixed_hash_set". + #endif + + #ifndef EASTL_FIXED_HASH_MULTISET_DEFAULT_NAME + #define EASTL_FIXED_HASH_MULTISET_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_hash_multiset" // Unless the user overrides something, this is "EASTL fixed_hash_multiset". + #endif + + + /// EASTL_FIXED_HASH_SET_DEFAULT_ALLOCATOR + /// EASTL_FIXED_HASH_MULTISET_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_FIXED_HASH_SET_DEFAULT_ALLOCATOR + #define EASTL_FIXED_HASH_SET_DEFAULT_ALLOCATOR overflow_allocator_type(EASTL_FIXED_HASH_SET_DEFAULT_NAME) + #endif + + #ifndef EASTL_FIXED_HASH_MULTISET_DEFAULT_ALLOCATOR + #define EASTL_FIXED_HASH_MULTISET_DEFAULT_ALLOCATOR overflow_allocator_type(EASTL_FIXED_HASH_MULTISET_DEFAULT_NAME) + #endif + + + + /// fixed_hash_set + /// + /// Implements a hash_set with a fixed block of memory identified by the nodeCount and bucketCount + /// template parameters. + /// + /// Template parameters: + /// Value The type of object the hash_set holds. + /// nodeCount The max number of objects to contain. This value must be >= 1. + /// bucketCount The number of buckets to use. This value must be >= 2. + /// bEnableOverflow Whether or not we should use the global heap if our object pool is exhausted. + /// Hash hash_set hash function. See hash_set. + /// Predicate hash_set equality testing function. See hash_set. + /// + template , typename Predicate = eastl::equal_to, bool bCacheHashCode = false, typename OverflowAllocator = EASTLAllocatorType> + class fixed_hash_set : public hash_set::node_type), + nodeCount, + EASTL_ALIGN_OF(typename hash_set::node_type), + 0, + bEnableOverflow, + OverflowAllocator>, + bCacheHashCode> + { + public: + typedef fixed_hashtable_allocator::node_type), nodeCount, + EASTL_ALIGN_OF(typename hash_set::node_type), + 0, bEnableOverflow, OverflowAllocator> fixed_allocator_type; + typedef typename fixed_allocator_type::overflow_allocator_type overflow_allocator_type; + typedef fixed_hash_set this_type; + typedef hash_set base_type; + typedef typename base_type::value_type value_type; + typedef typename base_type::node_type node_type; + typedef typename base_type::size_type size_type; + + enum { kMaxSize = nodeCount }; + + protected: + using base_type::mAllocator; + + node_type** mBucketBuffer[bucketCount + 1]; // '+1' because the hash table needs a null terminating bucket. + char mNodeBuffer[fixed_allocator_type::kBufferSize]; // kBufferSize will take into account alignment requirements. + + public: + explicit fixed_hash_set(const overflow_allocator_type& overflowAllocator); + + explicit fixed_hash_set(const Hash& hashFunction = Hash(), + const Predicate& predicate = Predicate()); + + fixed_hash_set(const Hash& hashFunction, + const Predicate& predicate, + const overflow_allocator_type& overflowAllocator); + + template + fixed_hash_set(InputIterator first, InputIterator last, + const Hash& hashFunction = Hash(), + const Predicate& predicate = Predicate()); + + fixed_hash_set(const this_type& x); + fixed_hash_set(this_type&& x); + fixed_hash_set(this_type&& x, const overflow_allocator_type& overflowAllocator); + + fixed_hash_set(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator = EASTL_FIXED_HASH_SET_DEFAULT_ALLOCATOR); + + this_type& operator=(const this_type& x); + this_type& operator=(std::initializer_list ilist); + this_type& operator=(this_type&& x); + + void swap(this_type& x); + + void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs. + + size_type max_size() const; + + const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT; + overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT; + void set_overflow_allocator(const overflow_allocator_type& allocator); + }; // fixed_hash_set + + + + + + + /// fixed_hash_multiset + /// + /// Implements a hash_multiset with a fixed block of memory identified by the nodeCount and bucketCount + /// template parameters. + /// + /// Value The type of object the hash_set holds. + /// nodeCount The max number of objects to contain. This value must be >= 1. + /// bucketCount The number of buckets to use. This value must be >= 2. + /// bEnableOverflow Whether or not we should use the global heap if our object pool is exhausted. + /// Hash hash_set hash function. See hash_set. + /// Predicate hash_set equality testing function. See hash_set. + /// + template , typename Predicate = eastl::equal_to, bool bCacheHashCode = false, typename OverflowAllocator = EASTLAllocatorType> + class fixed_hash_multiset : public hash_multiset::node_type), + nodeCount, + EASTL_ALIGN_OF(typename hash_multiset::node_type), + 0, + bEnableOverflow, + OverflowAllocator>, + bCacheHashCode> + { + public: + typedef fixed_hashtable_allocator::node_type), nodeCount, EASTL_ALIGN_OF(typename hash_multiset::node_type), 0, + bEnableOverflow, OverflowAllocator> fixed_allocator_type; + typedef typename fixed_allocator_type::overflow_allocator_type overflow_allocator_type; + typedef hash_multiset base_type; + typedef fixed_hash_multiset this_type; + typedef typename base_type::value_type value_type; + typedef typename base_type::node_type node_type; + typedef typename base_type::size_type size_type; + + enum { kMaxSize = nodeCount }; + + protected: + using base_type::mAllocator; + + node_type** mBucketBuffer[bucketCount + 1]; // '+1' because the hash table needs a null terminating bucket. + char mNodeBuffer[fixed_allocator_type::kBufferSize]; // kBufferSize will take into account alignment requirements. + + public: + explicit fixed_hash_multiset(const overflow_allocator_type& overflowAllocator); + + explicit fixed_hash_multiset(const Hash& hashFunction = Hash(), + const Predicate& predicate = Predicate()); + + fixed_hash_multiset(const Hash& hashFunction, + const Predicate& predicate, + const overflow_allocator_type& overflowAllocator); + + template + fixed_hash_multiset(InputIterator first, InputIterator last, + const Hash& hashFunction = Hash(), + const Predicate& predicate = Predicate()); + + fixed_hash_multiset(const this_type& x); + fixed_hash_multiset(this_type&& x); + fixed_hash_multiset(this_type&& x, const overflow_allocator_type& overflowAllocator); + fixed_hash_multiset(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator = EASTL_FIXED_HASH_MULTISET_DEFAULT_ALLOCATOR); + + this_type& operator=(const this_type& x); + this_type& operator=(std::initializer_list ilist); + this_type& operator=(this_type&& x); + + void swap(this_type& x); + + void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs. + + size_type max_size() const; + + const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT; + overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT; + void set_overflow_allocator(const overflow_allocator_type& allocator); + }; // fixed_hash_multiset + + + + + + /////////////////////////////////////////////////////////////////////// + // fixed_hash_set + /////////////////////////////////////////////////////////////////////// + + template + inline fixed_hash_set:: + fixed_hash_set(const overflow_allocator_type& overflowAllocator) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), + Hash(), Predicate(), fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator)) + { + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if (!bEnableOverflow) + { + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + } + + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_HASH_SET_DEFAULT_NAME); + #endif + + mAllocator.reset(mNodeBuffer); + } + + + template + inline fixed_hash_set:: + fixed_hash_set(const Hash& hashFunction, + const Predicate& predicate) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), + hashFunction, predicate, fixed_allocator_type(NULL, mBucketBuffer)) + { + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_HASH_SET_DEFAULT_NAME); + #endif + + mAllocator.reset(mNodeBuffer); + } + + + template + inline fixed_hash_set:: + fixed_hash_set(const Hash& hashFunction, + const Predicate& predicate, + const overflow_allocator_type& overflowAllocator) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), + hashFunction, predicate, fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator)) + { + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if (!bEnableOverflow) + { + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + } + + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_HASH_SET_DEFAULT_NAME); + #endif + + mAllocator.reset(mNodeBuffer); + } + + + template + template + fixed_hash_set:: + fixed_hash_set(InputIterator first, InputIterator last, + const Hash& hashFunction, + const Predicate& predicate) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), hashFunction, + predicate, fixed_allocator_type(NULL, mBucketBuffer)) + { + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + { + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + } + + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_HASH_SET_DEFAULT_NAME); + #endif + + mAllocator.reset(mNodeBuffer); + base_type::insert(first, last); + } + + + template + inline fixed_hash_set:: + fixed_hash_set(const this_type& x) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), x.hash_function(), + x.key_eq(), fixed_allocator_type(NULL, mBucketBuffer)) + { + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + mAllocator.reset(mNodeBuffer); + base_type::insert(x.begin(), x.end()); + } + + + template + inline fixed_hash_set::fixed_hash_set(this_type&& x) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), x.hash_function(), + x.key_eq(), fixed_allocator_type(NULL, mBucketBuffer)) + { + // This implementation is the same as above. If we could rely on using C++11 delegating constructor support then we could just call that here. + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + mAllocator.reset(mNodeBuffer); + base_type::insert(x.begin(), x.end()); + } + + + template + inline fixed_hash_set::fixed_hash_set(this_type&& x, const overflow_allocator_type& overflowAllocator) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), + x.hash_function(), x.key_eq(), fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator)) + { + // This implementation is the same as above. If we could rely on using C++11 delegating constructor support then we could just call that here. + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + mAllocator.reset(mNodeBuffer); + base_type::insert(x.begin(), x.end()); + } + + + template + inline fixed_hash_set:: + fixed_hash_set(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), Hash(), + Predicate(), fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator)) + { + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_HASH_SET_DEFAULT_NAME); + #endif + + mAllocator.reset(mNodeBuffer); + base_type::insert(ilist.begin(), ilist.end()); + } + + + template + typename fixed_hash_set::this_type& + fixed_hash_set::operator=(const this_type& x) + { + base_type::operator=(x); + return *this; + } + + + template + inline typename fixed_hash_set::this_type& + fixed_hash_set::operator=(this_type&& x) + { + operator=(x); + return *this; + } + + + template + inline typename fixed_hash_set::this_type& + fixed_hash_set::operator=(std::initializer_list ilist) + { + base_type::clear(); + base_type::insert(ilist.begin(), ilist.end()); + return *this; + } + + + template + inline void fixed_hash_set:: + swap(this_type& x) + { + // We must do a brute-force swap, because fixed containers cannot share memory allocations. + // Note that we create a temp value on the stack. This approach may fail if the size of the + // container is too large. We have a rule against allocating memory from the heap, and so + // if the user wants to swap two large objects of this class, the user will currently need + // to implement it manually. To consider: add code to allocate a temporary buffer if the + // size of the container is too large for the stack. + EASTL_ASSERT(sizeof(x) < EASTL_MAX_STACK_USAGE); // It is dangerous to try to create objects that are too big for the stack. + + const this_type temp(*this); // Can't call eastl::swap because that would + *this = x; // itself call this member swap function. + x = temp; + } + + + template + void fixed_hash_set:: + reset_lose_memory() + { + base_type::reset_lose_memory(); + base_type::get_allocator().reset(mNodeBuffer); + } + + + template + inline typename fixed_hash_set::size_type + fixed_hash_set::max_size() const + { + return kMaxSize; + } + + + template + inline const typename fixed_hash_set::overflow_allocator_type& + fixed_hash_set::get_overflow_allocator() const EA_NOEXCEPT + { + return mAllocator.get_overflow_allocator(); + } + + + template + inline typename fixed_hash_set::overflow_allocator_type& + fixed_hash_set::get_overflow_allocator() EA_NOEXCEPT + { + return mAllocator.get_overflow_allocator(); + } + + + template + inline void fixed_hash_set:: + set_overflow_allocator(const overflow_allocator_type& allocator) + { + mAllocator.set_overflow_allocator(allocator); + } + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + inline void swap(fixed_hash_set& a, + fixed_hash_set& b) + { + a.swap(b); + } + + + + + /////////////////////////////////////////////////////////////////////// + // fixed_hash_multiset + /////////////////////////////////////////////////////////////////////// + + template + inline fixed_hash_multiset:: + fixed_hash_multiset(const overflow_allocator_type& overflowAllocator) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), Hash(), + Predicate(), fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator)) + { + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_HASH_MULTISET_DEFAULT_NAME); + #endif + + mAllocator.reset(mNodeBuffer); + } + + + template + inline fixed_hash_multiset:: + fixed_hash_multiset(const Hash& hashFunction, + const Predicate& predicate) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), hashFunction, + predicate, fixed_allocator_type(NULL, mBucketBuffer)) + { + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_HASH_MULTISET_DEFAULT_NAME); + #endif + + mAllocator.reset(mNodeBuffer); + } + + + template + inline fixed_hash_multiset:: + fixed_hash_multiset(const Hash& hashFunction, + const Predicate& predicate, + const overflow_allocator_type& overflowAllocator) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), hashFunction, + predicate, fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator)) + { + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_HASH_MULTISET_DEFAULT_NAME); + #endif + + mAllocator.reset(mNodeBuffer); + } + + + template + template + inline fixed_hash_multiset:: + fixed_hash_multiset(InputIterator first, InputIterator last, + const Hash& hashFunction, + const Predicate& predicate) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), hashFunction, + predicate, fixed_allocator_type(NULL, mBucketBuffer)) + { + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_HASH_MULTISET_DEFAULT_NAME); + #endif + + mAllocator.reset(mNodeBuffer); + base_type::insert(first, last); + } + + + template + inline fixed_hash_multiset:: + fixed_hash_multiset(const this_type& x) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), x.hash_function(), + x.key_eq(), fixed_allocator_type(NULL, mBucketBuffer)) + { + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + mAllocator.reset(mNodeBuffer); + base_type::insert(x.begin(), x.end()); + } + + + template + inline fixed_hash_multiset::fixed_hash_multiset(this_type&& x) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), x.hash_function(), + x.key_eq(), fixed_allocator_type(NULL, mBucketBuffer)) + { + // This implementation is the same as above. If we could rely on using C++11 delegating constructor support then we could just call that here. + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + mAllocator.reset(mNodeBuffer); + base_type::insert(x.begin(), x.end()); + } + + + template + inline fixed_hash_multiset::fixed_hash_multiset(this_type&& x, const overflow_allocator_type& overflowAllocator) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), + x.hash_function(), x.key_eq(), fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator)) + { + // This implementation is the same as above. If we could rely on using C++11 delegating constructor support then we could just call that here. + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + mAllocator.reset(mNodeBuffer); + base_type::insert(x.begin(), x.end()); + } + + + template + inline fixed_hash_multiset:: + fixed_hash_multiset(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), Hash(), + Predicate(), fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator)) + { + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_HASH_MULTISET_DEFAULT_NAME); + #endif + + mAllocator.reset(mNodeBuffer); + base_type::insert(ilist.begin(), ilist.end()); + } + + + template + inline typename fixed_hash_multiset::this_type& + fixed_hash_multiset::operator=(const this_type& x) + { + base_type::operator=(x); + return *this; + } + + + template + inline typename fixed_hash_multiset::this_type& + fixed_hash_multiset::operator=(this_type&& x) + { + base_type::operator=(x); + return *this; + } + + + template + inline typename fixed_hash_multiset::this_type& + fixed_hash_multiset::operator=(std::initializer_list ilist) + { + base_type::clear(); + base_type::insert(ilist.begin(), ilist.end()); + return *this; + } + + + template + inline void fixed_hash_multiset:: + swap(this_type& x) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(*this, x); + } + + + template + inline void fixed_hash_multiset:: + reset_lose_memory() + { + base_type::reset_lose_memory(); + base_type::get_allocator().reset(mNodeBuffer); + } + + + template + inline typename fixed_hash_multiset::size_type + fixed_hash_multiset::max_size() const + { + return kMaxSize; + } + + + template + inline const typename fixed_hash_multiset::overflow_allocator_type& + fixed_hash_multiset::get_overflow_allocator() const EA_NOEXCEPT + { + return mAllocator.get_overflow_allocator(); + } + + + template + inline typename fixed_hash_multiset::overflow_allocator_type& + fixed_hash_multiset::get_overflow_allocator() EA_NOEXCEPT + { + return mAllocator.get_overflow_allocator(); + } + + + template + inline void fixed_hash_multiset:: + set_overflow_allocator(const overflow_allocator_type& allocator) + { + mAllocator.set_overflow_allocator(allocator); + } + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + inline void swap(fixed_hash_multiset& a, + fixed_hash_multiset& b) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(a, b); + } + + +} // namespace eastl + +EA_RESTORE_VC_WARNING() + +#endif // Header include guard + + + + + + + + + + + + diff --git a/external/EASTL/include/EASTL/fixed_list.h b/external/EASTL/include/EASTL/fixed_list.h new file mode 100644 index 00000000..e57c08bf --- /dev/null +++ b/external/EASTL/include/EASTL/fixed_list.h @@ -0,0 +1,388 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// This file implements a list which uses a fixed size memory pool for its nodes. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_FIXED_LIST_H +#define EASTL_FIXED_LIST_H + + +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + /// EASTL_FIXED_LIST_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// In the case of fixed-size containers, the allocator name always refers + /// to overflow allocations. + /// + #ifndef EASTL_FIXED_LIST_DEFAULT_NAME + #define EASTL_FIXED_LIST_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_list" // Unless the user overrides something, this is "EASTL fixed_list". + #endif + + + /// EASTL_FIXED_LIST_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_FIXED_LIST_DEFAULT_ALLOCATOR + #define EASTL_FIXED_LIST_DEFAULT_ALLOCATOR overflow_allocator_type(EASTL_FIXED_LIST_DEFAULT_NAME) + #endif + + + + /// fixed_list + /// + /// fixed_list is a list which uses a single block of contiguous memory + /// for its nodes. The purpose of this is to reduce memory usage relative + /// to a conventional memory allocation system (with block headers), to + /// increase allocation speed (often due to avoidance of mutex locks), + /// to increase performance (due to better memory locality), and to decrease + /// memory fragmentation due to the way that fixed block allocators work. + /// + /// The primary downside to a fixed_list is that the number of nodes it + /// can contain is fixed upon its declaration. If you want a fixed_list + /// that doesn't have this limitation, then you probably don't want a + /// fixed_list. You can always create your own memory allocator that works + /// the way you want. + /// + /// Template parameters: + /// T The type of object the list holds. + /// nodeCount The max number of objects to contain. + /// bEnableOverflow Whether or not we should use the overflow heap if our object pool is exhausted. + /// OverflowAllocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap. + /// + template + class fixed_list : public list::node_type), + nodeCount, EASTL_ALIGN_OF(typename list::node_type), 0, bEnableOverflow, OverflowAllocator> > + { + public: + typedef fixed_node_allocator::node_type), nodeCount, + EASTL_ALIGN_OF(typename list::node_type), 0, bEnableOverflow, OverflowAllocator> fixed_allocator_type; + typedef OverflowAllocator overflow_allocator_type; + typedef list base_type; + typedef fixed_list this_type; + typedef typename base_type::size_type size_type; + typedef typename base_type::value_type value_type; + typedef typename base_type::node_type node_type; + typedef typename base_type::iterator iterator; + + enum { kMaxSize = nodeCount }; + + using base_type::assign; + using base_type::resize; + using base_type::insert; + using base_type::size; + using base_type::get_allocator; + + protected: + char mBuffer[fixed_allocator_type::kBufferSize]; // kBufferSize will take into account alignment requirements. + + using base_type::internalAllocator; + + public: + fixed_list(); + explicit fixed_list(const overflow_allocator_type& overflowAllocator); // Only applicable if bEnableOverflow is true. + explicit fixed_list(size_type n); // Currently we don't support overflowAllocator specification for other constructors, for simplicity. + fixed_list(size_type n, const value_type& value); + fixed_list(const this_type& x); + fixed_list(this_type&& x); + fixed_list(this_type&&, const overflow_allocator_type& overflowAllocator); + fixed_list(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator = EASTL_FIXED_LIST_DEFAULT_ALLOCATOR); + + template + fixed_list(InputIterator first, InputIterator last); + + this_type& operator=(const this_type& x); + this_type& operator=(std::initializer_list ilist); + this_type& operator=(this_type&& x); + + void swap(this_type& x); + void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs. + size_type max_size() const; // Returns the max fixed size, which is the user-supplied nodeCount parameter. + bool full() const; // Returns true if the fixed space has been fully allocated. Note that if overflow is enabled, the container size can be greater than nodeCount but full() could return true because the fixed space may have a recently freed slot. + bool has_overflowed() const; // Returns true if the allocations spilled over into the overflow allocator. Meaningful only if overflow is enabled. + bool can_overflow() const; // Returns the value of the bEnableOverflow template parameter. + + // OverflowAllocator + const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT; + overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT; + void set_overflow_allocator(const overflow_allocator_type& allocator); + }; // fixed_list + + + + /////////////////////////////////////////////////////////////////////// + // fixed_list + /////////////////////////////////////////////////////////////////////// + + template + inline fixed_list::fixed_list() + : base_type(fixed_allocator_type(mBuffer)) + { + #if EASTL_NAME_ENABLED + internalAllocator().set_name(EASTL_FIXED_LIST_DEFAULT_NAME); + #endif + } + + + template + inline fixed_list::fixed_list(const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer, overflowAllocator)) + { + #if EASTL_NAME_ENABLED + internalAllocator().set_name(EASTL_FIXED_LIST_DEFAULT_NAME); + #endif + } + + + template + inline fixed_list::fixed_list(size_type n) + : base_type(fixed_allocator_type(mBuffer)) + { + #if EASTL_NAME_ENABLED + internalAllocator().set_name(EASTL_FIXED_LIST_DEFAULT_NAME); + #endif + + resize(n); + } + + + template + inline fixed_list::fixed_list(size_type n, const value_type& value) + : base_type(fixed_allocator_type(mBuffer)) + { + #if EASTL_NAME_ENABLED + internalAllocator().set_name(EASTL_FIXED_LIST_DEFAULT_NAME); + #endif + + resize(n, value); + } + + + template + inline fixed_list::fixed_list(const this_type& x) + : base_type(fixed_allocator_type(mBuffer)) + { + internalAllocator().copy_overflow_allocator(x.internalAllocator()); + + #if EASTL_NAME_ENABLED + internalAllocator().set_name(x.internalAllocator().get_name()); + #endif + + assign(x.begin(), x.end()); + } + + + template + inline fixed_list::fixed_list(this_type&& x) + : base_type(fixed_allocator_type(mBuffer)) + { + // Since we are a fixed_list, we can't normally swap pointers unless both this and + // x are using using overflow and the overflow allocators are equal. To do: + //if(has_overflowed() && x.has_overflowed() && (get_overflow_allocator() == x.get_overflow_allocator())) + //{ + // We can swap contents and may need to swap the allocators as well. + //} + + // The following is currently identical to the fixed_vector(const this_type& x) code above. If it stays that + // way then we may want to make a shared implementation. + internalAllocator().copy_overflow_allocator(x.internalAllocator()); + + #if EASTL_NAME_ENABLED + internalAllocator().set_name(x.internalAllocator().get_name()); + #endif + + assign(x.begin(), x.end()); + } + + + template + inline fixed_list::fixed_list(this_type&& x, const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer, overflowAllocator)) + { + // See comments above. + internalAllocator().copy_overflow_allocator(x.internalAllocator()); + + #if EASTL_NAME_ENABLED + internalAllocator().set_name(x.internalAllocator().get_name()); + #endif + + assign(x.begin(), x.end()); + } + + + template + inline fixed_list::fixed_list(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer, overflowAllocator)) + { + assign(ilist.begin(), ilist.end()); + } + + + template + template + fixed_list::fixed_list(InputIterator first, InputIterator last) + : base_type(fixed_allocator_type(mBuffer)) + { + #if EASTL_NAME_ENABLED + internalAllocator().set_name(EASTL_FIXED_LIST_DEFAULT_NAME); + #endif + + assign(first, last); + } + + + template + inline typename fixed_list::this_type& + fixed_list::operator=(const this_type& x) + { + if(this != &x) + { + base_type::clear(); + + #if EASTL_ALLOCATOR_COPY_ENABLED + internalAllocator() = x.internalAllocator(); // The primary effect of this is to copy the overflow allocator. + #endif + + base_type::assign(x.begin(), x.end()); // It would probably be better to implement this like list::operator=. + } + return *this; + } + + + template + inline typename fixed_list::this_type& + fixed_list::operator=(this_type&& x) + { + return operator=(x); + } + + + template + inline typename fixed_list::this_type& + fixed_list::operator=(std::initializer_list ilist) + { + base_type::clear(); + base_type::assign(ilist.begin(), ilist.end()); + return *this; + } + + + template + inline void fixed_list::swap(this_type& x) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(*this, x); + } + + + template + inline void fixed_list::reset_lose_memory() + { + base_type::reset_lose_memory(); + get_allocator().reset(mBuffer); + } + + + template + inline typename fixed_list::size_type + fixed_list::max_size() const + { + return kMaxSize; + } + + + template + inline bool fixed_list::full() const + { + // Note: This implementation isn't right in the case of bEnableOverflow = true because it will return + // false for the case that there are free nodes from the buffer but also nodes from the dynamic heap. + // This can happen if the container exceeds the fixed size and then frees some of the nodes from the fixed buffer. + // The only simple fix for this is to take on another member variable which tracks whether this overflow + // has occurred at some point in the past. + return !internalAllocator().can_allocate(); // This is the quickest way of detecting this. has_overflowed uses a different method because it can't use this quick method. + } + + + template + inline bool fixed_list::has_overflowed() const + { + #if EASTL_FIXED_SIZE_TRACKING_ENABLED // If we can use this faster pathway (as size() may be slow)... + return (internalAllocator().mPool.mnPeakSize > kMaxSize); + #else + return (size() > kMaxSize); + #endif + } + + + template + inline bool fixed_list::can_overflow() const + { + return bEnableOverflow; + } + + + template + inline const typename fixed_list::overflow_allocator_type& + fixed_list::get_overflow_allocator() const EA_NOEXCEPT + { + return internalAllocator().get_overflow_allocator(); + } + + + template + inline typename fixed_list::overflow_allocator_type& + fixed_list::get_overflow_allocator() EA_NOEXCEPT + { + return internalAllocator().get_overflow_allocator(); + } + + + template + inline void + fixed_list::set_overflow_allocator(const overflow_allocator_type& allocator) + { + internalAllocator().set_overflow_allocator(allocator); + } + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + inline void swap(fixed_list& a, + fixed_list& b) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(a, b); + } + + +} // namespace eastl + + +#endif // Header include guard + + + + + + + + + + + + diff --git a/external/EASTL/include/EASTL/fixed_map.h b/external/EASTL/include/EASTL/fixed_map.h new file mode 100644 index 00000000..c01db08f --- /dev/null +++ b/external/EASTL/include/EASTL/fixed_map.h @@ -0,0 +1,580 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// This file implements a map and multimap which use a fixed size memory +// pool for their nodes. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_FIXED_MAP_H +#define EASTL_FIXED_MAP_H + + +#include +#include // Included because fixed_rbtree_base resides here. + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + /// EASTL_FIXED_MAP_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// In the case of fixed-size containers, the allocator name always refers + /// to overflow allocations. + /// + #ifndef EASTL_FIXED_MAP_DEFAULT_NAME + #define EASTL_FIXED_MAP_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_map" // Unless the user overrides something, this is "EASTL fixed_map". + #endif + + #ifndef EASTL_FIXED_MULTIMAP_DEFAULT_NAME + #define EASTL_FIXED_MULTIMAP_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_multimap" // Unless the user overrides something, this is "EASTL fixed_multimap". + #endif + + + /// EASTL_FIXED_MAP_DEFAULT_ALLOCATOR + /// EASTL_FIXED_MULTIMAP_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_FIXED_MAP_DEFAULT_ALLOCATOR + #define EASTL_FIXED_MAP_DEFAULT_ALLOCATOR overflow_allocator_type(EASTL_FIXED_MAP_DEFAULT_NAME) + #endif + + #ifndef EASTL_FIXED_MULTIMAP_DEFAULT_ALLOCATOR + #define EASTL_FIXED_MULTIMAP_DEFAULT_ALLOCATOR overflow_allocator_type(EASTL_FIXED_MULTIMAP_DEFAULT_NAME) + #endif + + + + /// fixed_map + /// + /// Implements a map with a fixed block of memory identified by the + /// nodeCount template parameter. + /// + /// Key The key object (key in the key/value pair). + /// T The mapped object (value in the key/value pair). + /// nodeCount The max number of objects to contain. + /// bEnableOverflow Whether or not we should use the global heap if our object pool is exhausted. + /// Compare Compare function/object for set ordering. + /// OverflowAllocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap. + /// + template , typename OverflowAllocator = EASTLAllocatorType> + class fixed_map : public map::node_type), + nodeCount, EASTL_ALIGN_OF(eastl::pair), 0, bEnableOverflow, OverflowAllocator> > + { + public: + typedef fixed_node_allocator::node_type), nodeCount, + EASTL_ALIGN_OF(eastl::pair), 0, bEnableOverflow, OverflowAllocator> fixed_allocator_type; + typedef typename fixed_allocator_type::overflow_allocator_type overflow_allocator_type; + typedef fixed_map this_type; + typedef map base_type; + typedef typename base_type::value_type value_type; + typedef typename base_type::node_type node_type; + typedef typename base_type::size_type size_type; + + enum { kMaxSize = nodeCount }; + + using base_type::insert; + + protected: + char mBuffer[fixed_allocator_type::kBufferSize]; // kBufferSize will take into account alignment requirements. + + using base_type::mAllocator; + + public: + fixed_map(); + explicit fixed_map(const overflow_allocator_type& overflowAllocator); + explicit fixed_map(const Compare& compare); + fixed_map(const this_type& x); + fixed_map(this_type&& x); + fixed_map(this_type&& x, const overflow_allocator_type& overflowAllocator); + fixed_map(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator = EASTL_FIXED_MAP_DEFAULT_ALLOCATOR); + + template + fixed_map(InputIterator first, InputIterator last); + + this_type& operator=(const this_type& x); + this_type& operator=(std::initializer_list ilist); + this_type& operator=(this_type&& x); + + void swap(this_type& x); + + void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs. + + size_type max_size() const; + + const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT; + overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT; + void set_overflow_allocator(const overflow_allocator_type& allocator); + }; // fixed_map + + + + + /// fixed_multimap + /// + /// Implements a multimap with a fixed block of memory identified by the + /// nodeCount template parameter. + /// + /// Key The key object (key in the key/value pair). + /// T The mapped object (value in the key/value pair). + /// nodeCount The max number of objects to contain. + /// bEnableOverflow Whether or not we should use the global heap if our object pool is exhausted. + /// Compare Compare function/object for set ordering. + /// OverflowAllocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap. + /// + template , typename OverflowAllocator = EASTLAllocatorType> + class fixed_multimap : public multimap::node_type), + nodeCount, EASTL_ALIGN_OF(eastl::pair), 0, bEnableOverflow, OverflowAllocator> > + { + public: + typedef fixed_node_allocator::node_type), nodeCount, + EASTL_ALIGN_OF(eastl::pair), 0, bEnableOverflow, OverflowAllocator> fixed_allocator_type; + typedef typename fixed_allocator_type::overflow_allocator_type overflow_allocator_type; + typedef multimap base_type; + typedef fixed_multimap this_type; + typedef typename base_type::value_type value_type; + typedef typename base_type::node_type node_type; + typedef typename base_type::size_type size_type; + + enum { kMaxSize = nodeCount }; + + using base_type::insert; + + protected: + char mBuffer[fixed_allocator_type::kBufferSize]; // kBufferSize will take into account alignment requirements. + + using base_type::mAllocator; + using base_type::get_compare; + + public: + fixed_multimap(); + fixed_multimap(const overflow_allocator_type& overflowAllocator); + explicit fixed_multimap(const Compare& compare); + fixed_multimap(const this_type& x); + fixed_multimap(this_type&& x); + fixed_multimap(this_type&& x, const overflow_allocator_type& overflowAllocator); + fixed_multimap(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator = EASTL_FIXED_MULTIMAP_DEFAULT_ALLOCATOR); + + template + fixed_multimap(InputIterator first, InputIterator last); + + this_type& operator=(const this_type& x); + this_type& operator=(std::initializer_list ilist); + this_type& operator=(this_type&& x); + + void swap(this_type& x); + + void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs. + + size_type max_size() const; + + const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT; + overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT; + void set_overflow_allocator(const overflow_allocator_type& allocator); + }; // fixed_multimap + + + + + + /////////////////////////////////////////////////////////////////////// + // fixed_map + /////////////////////////////////////////////////////////////////////// + + template + inline fixed_map::fixed_map() + : base_type(fixed_allocator_type(mBuffer)) + { + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_MAP_DEFAULT_NAME); + #endif + } + + + template + inline fixed_map::fixed_map(const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer, overflowAllocator)) + { + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_MAP_DEFAULT_NAME); + #endif + } + + + template + inline fixed_map::fixed_map(const Compare& compare) + : base_type(compare, fixed_allocator_type(mBuffer)) + { + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_MAP_DEFAULT_NAME); + #endif + } + + + template + inline fixed_map::fixed_map(const this_type& x) + : base_type(x.get_compare(), fixed_allocator_type(mBuffer)) + { + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + base_type::operator=(x); + } + + + template + inline fixed_map::fixed_map(this_type&& x) + : base_type(x.get_compare(), fixed_allocator_type(mBuffer)) + { + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + base_type::operator=(x); + } + + + template + inline fixed_map::fixed_map(this_type&& x, const overflow_allocator_type& overflowAllocator) + : base_type(x.get_compare(), fixed_allocator_type(mBuffer, overflowAllocator)) + { + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + base_type::operator=(x); + } + + + template + fixed_map::fixed_map(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer, overflowAllocator)) + { + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_MAP_DEFAULT_NAME); + #endif + + insert(ilist.begin(), ilist.end()); + } + + + template + template + fixed_map::fixed_map(InputIterator first, InputIterator last) + : base_type(fixed_allocator_type(mBuffer)) + { + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_MAP_DEFAULT_NAME); + #endif + + insert(first, last); + } + + + template + inline typename fixed_map::this_type& + fixed_map::operator=(const this_type& x) + { + base_type::operator=(x); + return *this; + } + + + template + inline typename fixed_map::this_type& + fixed_map::operator=(std::initializer_list ilist) + { + base_type::clear(); + insert(ilist.begin(), ilist.end()); + return *this; + } + + + template + inline typename fixed_map::this_type& + fixed_map::operator=(this_type&& x) + { + base_type::operator=(x); + return *this; + } + + + template + inline void fixed_map::swap(this_type& x) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(*this, x); + } + + + template + inline void fixed_map::reset_lose_memory() + { + base_type::reset_lose_memory(); + base_type::get_allocator().reset(mBuffer); + } + + + template + inline typename fixed_map::size_type + fixed_map::max_size() const + { + return kMaxSize; + } + + + template + inline const typename fixed_map::overflow_allocator_type& + fixed_map::get_overflow_allocator() const EA_NOEXCEPT + { + return mAllocator.get_overflow_allocator(); + } + + + template + inline typename fixed_map::overflow_allocator_type& + fixed_map::get_overflow_allocator() EA_NOEXCEPT + { + return mAllocator.get_overflow_allocator(); + } + + + template + inline void + fixed_map::set_overflow_allocator(const overflow_allocator_type& allocator) + { + mAllocator.set_overflow_allocator(allocator); + } + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + inline void swap(fixed_map& a, + fixed_map& b) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(a, b); + } + + + + + /////////////////////////////////////////////////////////////////////// + // fixed_multimap + /////////////////////////////////////////////////////////////////////// + + template + inline fixed_multimap::fixed_multimap() + : base_type(fixed_allocator_type(mBuffer)) + { + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_MULTIMAP_DEFAULT_NAME); + #endif + } + + + template + inline fixed_multimap::fixed_multimap(const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer, overflowAllocator)) + { + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_MULTIMAP_DEFAULT_NAME); + #endif + } + + + template + inline fixed_multimap::fixed_multimap(const Compare& compare) + : base_type(compare, fixed_allocator_type(mBuffer)) + { + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_MULTIMAP_DEFAULT_NAME); + #endif + } + + + template + inline fixed_multimap::fixed_multimap(const this_type& x) + : base_type(x.get_compare(), fixed_allocator_type(mBuffer)) + { + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + base_type::operator=(x); + } + + + template + inline fixed_multimap::fixed_multimap(this_type&& x) + : base_type(x.get_compare(), fixed_allocator_type(mBuffer)) + { + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + base_type::operator=(x); + } + + + template + inline fixed_multimap::fixed_multimap(this_type&& x, const overflow_allocator_type& overflowAllocator) + : base_type(x.get_compare(), fixed_allocator_type(mBuffer, overflowAllocator)) + { + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + base_type::operator=(x); + } + + + template + fixed_multimap::fixed_multimap(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer, overflowAllocator)) + { + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_MULTIMAP_DEFAULT_NAME); + #endif + + insert(ilist.begin(), ilist.end()); + } + + + template + template + fixed_multimap:: + fixed_multimap(InputIterator first, InputIterator last) + : base_type(fixed_allocator_type(mBuffer)) + { + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_MULTIMAP_DEFAULT_NAME); + #endif + + insert(first, last); + } + + + template + inline typename fixed_multimap::this_type& + fixed_multimap::operator=(const this_type& x) + { + base_type::operator=(x); + return *this; + } + + + template + inline typename fixed_multimap::this_type& + fixed_multimap::operator=(std::initializer_list ilist) + { + base_type::clear(); + insert(ilist.begin(), ilist.end()); + return *this; + } + + + template + inline typename fixed_multimap::this_type& + fixed_multimap::operator=(this_type&& x) + { + base_type::operator=(x); + return *this; + } + + + template + inline void fixed_multimap::swap(this_type& x) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(*this, x); + } + + + template + inline void fixed_multimap::reset_lose_memory() + { + base_type::reset_lose_memory(); + base_type::get_allocator().reset(mBuffer); + } + + + template + inline typename fixed_multimap::size_type + fixed_multimap::max_size() const + { + return kMaxSize; + } + + + template + inline const typename fixed_multimap::overflow_allocator_type& + fixed_multimap::get_overflow_allocator() const EA_NOEXCEPT + { + return mAllocator.get_overflow_allocator(); + } + + + template + inline typename fixed_multimap::overflow_allocator_type& + fixed_multimap::get_overflow_allocator() EA_NOEXCEPT + { + return mAllocator.get_overflow_allocator(); + } + + + template + inline void + fixed_multimap::set_overflow_allocator(const overflow_allocator_type& allocator) + { + mAllocator.set_overflow_allocator(allocator); + } + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + inline void swap(fixed_multimap& a, + fixed_multimap& b) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(a, b); + } + + +} // namespace eastl + + +#endif // Header include guard + + + + + + + + + diff --git a/external/EASTL/include/EASTL/fixed_set.h b/external/EASTL/include/EASTL/fixed_set.h new file mode 100644 index 00000000..e5f00236 --- /dev/null +++ b/external/EASTL/include/EASTL/fixed_set.h @@ -0,0 +1,578 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// This file implements a set and multiset which use a fixed size memory +// pool for their nodes. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_FIXED_SET_H +#define EASTL_FIXED_SET_H + + +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + /// EASTL_FIXED_SET_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// In the case of fixed-size containers, the allocator name always refers + /// to overflow allocations. + /// + #ifndef EASTL_FIXED_SET_DEFAULT_NAME + #define EASTL_FIXED_SET_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_set" // Unless the user overrides something, this is "EASTL fixed_set". + #endif + + #ifndef EASTL_FIXED_MULTISET_DEFAULT_NAME + #define EASTL_FIXED_MULTISET_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_multiset" // Unless the user overrides something, this is "EASTL fixed_multiset". + #endif + + + /// EASTL_FIXED_SET_DEFAULT_ALLOCATOR + /// EASTL_FIXED_MULTISET_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_FIXED_SET_DEFAULT_ALLOCATOR + #define EASTL_FIXED_SET_DEFAULT_ALLOCATOR overflow_allocator_type(EASTL_FIXED_SET_DEFAULT_NAME) + #endif + + #ifndef EASTL_FIXED_MULTISET_DEFAULT_ALLOCATOR + #define EASTL_FIXED_MULTISET_DEFAULT_ALLOCATOR overflow_allocator_type(EASTL_FIXED_MULTISET_DEFAULT_NAME) + #endif + + + + /// fixed_set + /// + /// Implements a set with a fixed block of memory identified by the + /// nodeCount template parameter. + /// + /// Template parameters: + /// Key The type of object the set holds (a.k.a. value). + /// nodeCount The max number of objects to contain. + /// bEnableOverflow Whether or not we should use the global heap if our object pool is exhausted. + /// Compare Compare function/object for set ordering. + /// OverflowAllocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap. + /// + template , typename OverflowAllocator = EASTLAllocatorType> + class fixed_set : public set::node_type), + nodeCount, EASTL_ALIGN_OF(Key), 0, bEnableOverflow, OverflowAllocator> > + { + public: + typedef fixed_node_allocator::node_type), nodeCount, + EASTL_ALIGN_OF(Key), 0, bEnableOverflow, OverflowAllocator> fixed_allocator_type; + typedef typename fixed_allocator_type::overflow_allocator_type overflow_allocator_type; + typedef set base_type; + typedef fixed_set this_type; + typedef typename base_type::value_type value_type; + typedef typename base_type::node_type node_type; + typedef typename base_type::size_type size_type; + + enum { kMaxSize = nodeCount }; + + using base_type::insert; + + protected: + char mBuffer[fixed_allocator_type::kBufferSize]; // kBufferSize will take into account alignment requirements. + + using base_type::mAllocator; + using base_type::get_compare; + + public: + fixed_set(); + fixed_set(const overflow_allocator_type& overflowAllocator); + explicit fixed_set(const Compare& compare); + fixed_set(const this_type& x); + fixed_set(this_type&& x); + fixed_set(this_type&& x, const overflow_allocator_type& overflowAllocator); + fixed_set(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator = EASTL_FIXED_SET_DEFAULT_ALLOCATOR); + + template + fixed_set(InputIterator first, InputIterator last); + + this_type& operator=(const this_type& x); + this_type& operator=(std::initializer_list ilist); + this_type& operator=(this_type&& x); + + void swap(this_type& x); + + void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs. + + size_type max_size() const; + + const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT; + overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT; + void set_overflow_allocator(const overflow_allocator_type& allocator); + }; // fixed_set + + + + + + + /// fixed_multiset + /// + /// Implements a multiset with a fixed block of memory identified by the + /// nodeCount template parameter. + /// + /// Key The type of object the set holds (a.k.a. value). + /// nodeCount The max number of objects to contain. + /// bEnableOverflow Whether or not we should use the global heap if our object pool is exhausted. + /// Compare Compare function/object for set ordering. + /// OverflowAllocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap. + /// + template , typename OverflowAllocator = EASTLAllocatorType> + class fixed_multiset : public multiset::node_type), + nodeCount, EASTL_ALIGN_OF(Key), 0, bEnableOverflow, OverflowAllocator> > + { + public: + typedef fixed_node_allocator::node_type), nodeCount, + EASTL_ALIGN_OF(Key), 0, bEnableOverflow, OverflowAllocator> fixed_allocator_type; + typedef typename fixed_allocator_type::overflow_allocator_type overflow_allocator_type; + typedef multiset base_type; + typedef fixed_multiset this_type; + typedef typename base_type::value_type value_type; + typedef typename base_type::node_type node_type; + typedef typename base_type::size_type size_type; + + enum { kMaxSize = nodeCount }; + + using base_type::insert; + + protected: + char mBuffer[fixed_allocator_type::kBufferSize]; // kBufferSize will take into account alignment requirements. + + using base_type::mAllocator; + + public: + fixed_multiset(); + fixed_multiset(const overflow_allocator_type& overflowAllocator); + explicit fixed_multiset(const Compare& compare); + fixed_multiset(const this_type& x); + fixed_multiset(this_type&& x); + fixed_multiset(this_type&& x, const overflow_allocator_type& overflowAllocator); + fixed_multiset(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator = EASTL_FIXED_MULTISET_DEFAULT_ALLOCATOR); + + template + fixed_multiset(InputIterator first, InputIterator last); + + this_type& operator=(const this_type& x); + this_type& operator=(std::initializer_list ilist); + this_type& operator=(this_type&& x); + + void swap(this_type& x); + + void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs. + + size_type max_size() const; + + const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT; + overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT; + void set_overflow_allocator(const overflow_allocator_type& allocator); + }; // fixed_multiset + + + + + /////////////////////////////////////////////////////////////////////// + // fixed_set + /////////////////////////////////////////////////////////////////////// + + template + inline fixed_set::fixed_set() + : base_type(fixed_allocator_type(mBuffer)) + { + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_SET_DEFAULT_NAME); + #endif + } + + + template + inline fixed_set::fixed_set(const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer, overflowAllocator)) + { + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_SET_DEFAULT_NAME); + #endif + } + + + template + inline fixed_set::fixed_set(const Compare& compare) + : base_type(compare, fixed_allocator_type(mBuffer)) + { + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_SET_DEFAULT_NAME); + #endif + } + + + template + inline fixed_set::fixed_set(const this_type& x) + : base_type(x.get_compare(), fixed_allocator_type(mBuffer)) + { + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + base_type::operator=(x); + } + + + template + inline fixed_set::fixed_set(this_type&& x) + : base_type(x.get_compare(), fixed_allocator_type(mBuffer)) + { + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + base_type::operator=(x); + } + + + template + inline fixed_set::fixed_set(this_type&& x, const overflow_allocator_type& overflowAllocator) + : base_type(x.get_compare(), fixed_allocator_type(mBuffer, overflowAllocator)) + { + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + base_type::operator=(x); + } + + + template + fixed_set::fixed_set(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer, overflowAllocator)) + { + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_SET_DEFAULT_NAME); + #endif + + insert(ilist.begin(), ilist.end()); + } + + + template + template + fixed_set::fixed_set(InputIterator first, InputIterator last) + : base_type(fixed_allocator_type(mBuffer)) + { + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_SET_DEFAULT_NAME); + #endif + + insert(first, last); + } + + + template + inline typename fixed_set::this_type& + fixed_set::operator=(const this_type& x) + { + base_type::operator=(x); + return *this; + } + + + template + inline typename fixed_set::this_type& + fixed_set::operator=(std::initializer_list ilist) + { + base_type::clear(); + insert(ilist.begin(), ilist.end()); + return *this; + } + + + template + inline typename fixed_set::this_type& + fixed_set::operator=(this_type&& x) + { + base_type::operator=(x); + return *this; + } + + + template + inline void fixed_set::swap(this_type& x) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(*this, x); + } + + + template + inline void fixed_set::reset_lose_memory() + { + base_type::reset_lose_memory(); + base_type::get_allocator().reset(mBuffer); + } + + + template + inline typename fixed_set::size_type + fixed_set::max_size() const + { + return kMaxSize; + } + + + template + inline const typename fixed_set::overflow_allocator_type& + fixed_set::get_overflow_allocator() const EA_NOEXCEPT + { + return mAllocator.get_overflow_allocator(); + } + + + template + inline typename fixed_set::overflow_allocator_type& + fixed_set::get_overflow_allocator() EA_NOEXCEPT + { + return mAllocator.get_overflow_allocator(); + } + + + template + inline void fixed_set::set_overflow_allocator(const overflow_allocator_type& allocator) + { + mAllocator.set_overflow_allocator(allocator); + } + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + inline void swap(fixed_set& a, + fixed_set& b) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(a, b); + } + + + + /////////////////////////////////////////////////////////////////////// + // fixed_multiset + /////////////////////////////////////////////////////////////////////// + + template + inline fixed_multiset::fixed_multiset() + : base_type(fixed_allocator_type(mBuffer)) + { + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_MULTISET_DEFAULT_NAME); + #endif + } + + + template + inline fixed_multiset::fixed_multiset(const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer, overflowAllocator)) + { + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_MULTISET_DEFAULT_NAME); + #endif + } + + + template + inline fixed_multiset::fixed_multiset(const Compare& compare) + : base_type(compare, fixed_allocator_type(mBuffer)) + { + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_MULTISET_DEFAULT_NAME); + #endif + } + + + template + inline fixed_multiset::fixed_multiset(const this_type& x) + : base_type(x.get_compare(), fixed_allocator_type(mBuffer)) + { + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + base_type::operator=(x); + } + + + template + inline fixed_multiset::fixed_multiset(this_type&& x) + : base_type(x.get_compare(), fixed_allocator_type(mBuffer)) + { + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + base_type::operator=(x); + } + + + template + inline fixed_multiset::fixed_multiset(this_type&& x, const overflow_allocator_type& overflowAllocator) + : base_type(x.get_compare(), fixed_allocator_type(mBuffer, overflowAllocator)) + { + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + base_type::operator=(x); + } + + + template + fixed_multiset::fixed_multiset(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer, overflowAllocator)) + { + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_MULTISET_DEFAULT_NAME); + #endif + + insert(ilist.begin(), ilist.end()); + } + + + template + template + fixed_multiset::fixed_multiset(InputIterator first, InputIterator last) + : base_type(fixed_allocator_type(mBuffer)) + { + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_MULTISET_DEFAULT_NAME); + #endif + + insert(first, last); + } + + + template + inline typename fixed_multiset::this_type& + fixed_multiset::operator=(const this_type& x) + { + base_type::operator=(x); + return *this; + } + + + template + inline typename fixed_multiset::this_type& + fixed_multiset::operator=(std::initializer_list ilist) + { + base_type::clear(); + insert(ilist.begin(), ilist.end()); + return *this; + } + + + template + inline typename fixed_multiset::this_type& + fixed_multiset::operator=(this_type&& x) + { + base_type::operator=(x); + return *this; + } + + + template + inline void fixed_multiset::swap(this_type& x) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(*this, x); + } + + + template + inline void fixed_multiset::reset_lose_memory() + { + base_type::reset_lose_memory(); + base_type::get_allocator().reset(mBuffer); + } + + + template + inline typename fixed_multiset::size_type + fixed_multiset::max_size() const + { + return kMaxSize; + } + + + template + inline const typename fixed_multiset::overflow_allocator_type& + fixed_multiset::get_overflow_allocator() const EA_NOEXCEPT + { + return mAllocator.get_overflow_allocator(); + } + + + template + inline typename fixed_multiset::overflow_allocator_type& + fixed_multiset::get_overflow_allocator() EA_NOEXCEPT + { + return mAllocator.get_overflow_allocator(); + } + + + template + inline void fixed_multiset::set_overflow_allocator(const overflow_allocator_type& allocator) + { + mAllocator.set_overflow_allocator(allocator); + } + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + inline void swap(fixed_multiset& a, + fixed_multiset& b) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(a, b); + } + + + +} // namespace eastl + + +#endif // Header include guard + + + + + + + + + diff --git a/external/EASTL/include/EASTL/fixed_slist.h b/external/EASTL/include/EASTL/fixed_slist.h new file mode 100644 index 00000000..abad7ad9 --- /dev/null +++ b/external/EASTL/include/EASTL/fixed_slist.h @@ -0,0 +1,389 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// This file implements an slist which uses a fixed size memory pool for its nodes. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_FIXED_SLIST_H +#define EASTL_FIXED_SLIST_H + + +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + /// EASTL_FIXED_SLIST_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// In the case of fixed-size containers, the allocator name always refers + /// to overflow allocations. + /// + #ifndef EASTL_FIXED_SLIST_DEFAULT_NAME + #define EASTL_FIXED_SLIST_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_slist" // Unless the user overrides something, this is "EASTL fixed_slist". + #endif + + + /// EASTL_FIXED_SLIST_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_FIXED_SLIST_DEFAULT_ALLOCATOR + #define EASTL_FIXED_SLIST_DEFAULT_ALLOCATOR overflow_allocator_type(EASTL_FIXED_SLIST_DEFAULT_NAME) + #endif + + + + /// fixed_slist + /// + /// fixed_slist is an slist which uses a single block of contiguous memory + /// for its nodes. The purpose of this is to reduce memory usage relative + /// to a conventional memory allocation system (with block headers), to + /// increase allocation speed (often due to avoidance of mutex locks), + /// to increase performance (due to better memory locality), and to decrease + /// memory fragmentation due to the way that fixed block allocators work. + /// + /// The primary downside to a fixed_slist is that the number of nodes it + /// can contain is fixed upon its declaration. If you want a fixed_slist + /// that doesn't have this limitation, then you probably don't want a + /// fixed_slist. You can always create your own memory allocator that works + /// the way you want. + /// + /// Template parameters: + /// T The type of object the slist holds. + /// nodeCount The max number of objects to contain. + /// bEnableOverflow Whether or not we should use the overflow heap if our object pool is exhausted. + /// OverflowAllocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap. + /// + template + class fixed_slist : public slist::node_type), + nodeCount, EASTL_ALIGN_OF(typename slist::node_type), 0, bEnableOverflow, OverflowAllocator> > + { + public: + typedef fixed_node_allocator::node_type), nodeCount, + EASTL_ALIGN_OF(typename slist::node_type), 0, bEnableOverflow, OverflowAllocator> fixed_allocator_type; + typedef OverflowAllocator overflow_allocator_type; + typedef slist base_type; + typedef fixed_slist this_type; + typedef typename base_type::size_type size_type; + typedef typename base_type::value_type value_type; + typedef typename base_type::node_type node_type; + + enum { kMaxSize = nodeCount }; + + using base_type::assign; + using base_type::resize; + using base_type::size; + + protected: + char mBuffer[fixed_allocator_type::kBufferSize]; // kBufferSize will take into account alignment requirements. + + using base_type::internalAllocator; + + public: + fixed_slist(); + explicit fixed_slist(const overflow_allocator_type& overflowAllocator); // Only applicable if bEnableOverflow is true. + explicit fixed_slist(size_type n); // Currently we don't support overflowAllocator specification for other constructors, for simplicity. + fixed_slist(size_type n, const value_type& value); + fixed_slist(const this_type& x); + fixed_slist(this_type&& x); + fixed_slist(this_type&&, const overflow_allocator_type&); + fixed_slist(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator = EASTL_FIXED_SLIST_DEFAULT_ALLOCATOR); + + template + fixed_slist(InputIterator first, InputIterator last); + + this_type& operator=(const this_type& x); + this_type& operator=(std::initializer_list ilist); + this_type& operator=(this_type&& x); + + void swap(this_type& x); + void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs. + size_type max_size() const; // Returns the max fixed size, which is the user-supplied nodeCount parameter. + bool full() const; // Returns true if the fixed space has been fully allocated. Note that if overflow is enabled, the container size can be greater than nodeCount but full() could return true because the fixed space may have a recently freed slot. + bool has_overflowed() const; // Returns true if the allocations spilled over into the overflow allocator. Meaningful only if overflow is enabled. + bool can_overflow() const; // Returns the value of the bEnableOverflow template parameter. + + // OverflowAllocator + const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT; + overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT; + void set_overflow_allocator(const overflow_allocator_type& allocator); + }; // fixed_slist + + + + + /////////////////////////////////////////////////////////////////////// + // slist + /////////////////////////////////////////////////////////////////////// + + template + inline fixed_slist::fixed_slist() + : base_type(fixed_allocator_type(mBuffer)) + { + #if EASTL_NAME_ENABLED + internalAllocator().set_name(EASTL_FIXED_SLIST_DEFAULT_NAME); + #endif + } + + + template + inline fixed_slist::fixed_slist(const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer, overflowAllocator)) + { + #if EASTL_NAME_ENABLED + internalAllocator().set_name(EASTL_FIXED_SLIST_DEFAULT_NAME); + #endif + } + + + template + inline fixed_slist::fixed_slist(size_type n) + : base_type(fixed_allocator_type(mBuffer)) + { + #if EASTL_NAME_ENABLED + internalAllocator().set_name(EASTL_FIXED_SLIST_DEFAULT_NAME); + #endif + + resize(n); + } + + + template + inline fixed_slist::fixed_slist(size_type n, const value_type& value) + : base_type(fixed_allocator_type(mBuffer)) + { + #if EASTL_NAME_ENABLED + internalAllocator().set_name(EASTL_FIXED_SLIST_DEFAULT_NAME); + #endif + + resize(n, value); + } + + + template + inline fixed_slist::fixed_slist(const this_type& x) + : base_type(fixed_allocator_type(mBuffer)) + { + internalAllocator().copy_overflow_allocator(x.internalAllocator()); + + #if EASTL_NAME_ENABLED + internalAllocator().set_name(x.internalAllocator().get_name()); + #endif + + assign(x.begin(), x.end()); + } + + + template + inline fixed_slist::fixed_slist(this_type&& x) + : base_type(fixed_allocator_type(mBuffer)) + { + // Since we are a fixed_list, we can't normally swap pointers unless both this and + // x are using using overflow and the overflow allocators are equal. To do: + //if(has_overflowed() && x.has_overflowed() && (get_overflow_allocator() == x.get_overflow_allocator())) + //{ + // We can swap contents and may need to swap the allocators as well. + //} + + // The following is currently identical to the fixed_vector(const this_type& x) code above. If it stays that + // way then we may want to make a shared implementation. + internalAllocator().copy_overflow_allocator(x.internalAllocator()); + + #if EASTL_NAME_ENABLED + internalAllocator().set_name(x.internalAllocator().get_name()); + #endif + + assign(x.begin(), x.end()); + } + + template + inline fixed_slist::fixed_slist(this_type&& x, const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer, overflowAllocator)) + { + // See comments above. + internalAllocator().copy_overflow_allocator(x.internalAllocator()); + + #if EASTL_NAME_ENABLED + internalAllocator().set_name(x.internalAllocator().get_name()); + #endif + + assign(x.begin(), x.end()); + } + + + template + inline fixed_slist::fixed_slist(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer, overflowAllocator)) + { + #if EASTL_NAME_ENABLED + internalAllocator().set_name(EASTL_FIXED_SLIST_DEFAULT_NAME); + #endif + + assign(ilist.begin(), ilist.end()); + } + + + template + template + fixed_slist::fixed_slist(InputIterator first, InputIterator last) + : base_type(fixed_allocator_type(mBuffer)) + { + #if EASTL_NAME_ENABLED + internalAllocator().set_name(EASTL_FIXED_SLIST_DEFAULT_NAME); + #endif + + assign(first, last); + } + + + template + inline typename fixed_slist::this_type& + fixed_slist::operator=(const this_type& x) + { + if(this != &x) + { + base_type::clear(); + + #if EASTL_ALLOCATOR_COPY_ENABLED + internalAllocator() = x.internalAllocator(); // The primary effect of this is to copy the overflow allocator. + #endif + + base_type::assign(x.begin(), x.end()); // It would probably be better to implement this like slist::operator=. + } + return *this; + } + + + template + inline typename fixed_slist::this_type& + fixed_slist::operator=(this_type&& x) + { + return operator=(x); + } + + + template + inline typename fixed_slist::this_type& + fixed_slist::operator=(std::initializer_list ilist) + { + base_type::clear(); + base_type::assign(ilist.begin(), ilist.end()); + return *this; + } + + + template + inline void fixed_slist::swap(this_type& x) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(*this, x); + } + + + template + inline void fixed_slist::reset_lose_memory() + { + base_type::reset_lose_memory(); + base_type::get_allocator().reset(mBuffer); + } + + + template + inline typename fixed_slist::size_type + fixed_slist::max_size() const + { + return kMaxSize; + } + + + template + inline bool fixed_slist::full() const + { + // Note: This implementation isn't right in the case of bEnableOverflow = true because it will return + // false for the case that there are free nodes from the buffer but also nodes from the dynamic heap. + // This can happen if the container exceeds the fixed size and then frees some of the nodes from the fixed buffer. + return !internalAllocator().can_allocate(); // This is the quickest way of detecting this. has_overflowed uses a different method because it can't use this quick method. + } + + + template + inline bool fixed_slist::has_overflowed() const + { + #if EASTL_FIXED_SIZE_TRACKING_ENABLED // If we can use this faster pathway (as size() may be slow)... + return (internalAllocator().mPool.mnPeakSize > kMaxSize); + #else + return (size() > kMaxSize); + #endif + } + + + template + inline bool fixed_slist::can_overflow() const + { + return bEnableOverflow; + } + + + template + inline const typename fixed_slist::overflow_allocator_type& + fixed_slist::get_overflow_allocator() const EA_NOEXCEPT + { + return internalAllocator().get_overflow_allocator(); + } + + + template + inline typename fixed_slist::overflow_allocator_type& + fixed_slist::get_overflow_allocator() EA_NOEXCEPT + { + return internalAllocator().get_overflow_allocator(); + } + + + template + inline void + fixed_slist::set_overflow_allocator(const overflow_allocator_type& allocator) + { + internalAllocator().set_overflow_allocator(allocator); + } + + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + inline void swap(fixed_slist& a, + fixed_slist& b) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(a, b); + } + + + +} // namespace eastl + + +#endif // Header include guard + + + + + + + + + + + + diff --git a/external/EASTL/include/EASTL/fixed_string.h b/external/EASTL/include/EASTL/fixed_string.h new file mode 100644 index 00000000..68e5eea5 --- /dev/null +++ b/external/EASTL/include/EASTL/fixed_string.h @@ -0,0 +1,824 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// This file implements a string which uses a fixed size memory pool. +// The bEnableOverflow template parameter allows the container to resort to +// heap allocations if the memory pool is exhausted. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_FIXED_STRING_H +#define EASTL_FIXED_STRING_H + +#include +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + +namespace eastl +{ + /// EASTL_FIXED_STRING_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// In the case of fixed-size containers, the allocator name always refers + /// to overflow allocations. + /// + #ifndef EASTL_FIXED_STRING_DEFAULT_NAME + #define EASTL_FIXED_STRING_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_string" // Unless the user overrides something, this is "EASTL fixed_string". + #endif + + + + /// fixed_string + /// + /// A fixed_string with bEnableOverflow == true is identical to a regular + /// string in terms of its behavior. All the expectations of regular string + /// apply to it and no additional expectations come from it. When bEnableOverflow + /// is false, fixed_string behaves like regular string with the exception that + /// its capacity can never increase. All operations you do on such a fixed_string + /// which require a capacity increase will result in undefined behavior or an + /// C++ allocation exception, depending on the configuration of EASTL. + /// + /// Note: The nodeCount value is the amount of characters to allocate, which needs to + /// take into account a terminating zero. Thus if you want to store strings with a strlen + /// of 30, the nodeCount value must be at least 31. + /// + /// Template parameters: + /// T The type of object the string holds (char, wchar_t, char8_t, char16_t, char32_t). + /// nodeCount The max number of objects to contain. + /// bEnableOverflow Whether or not we should use the overflow heap if our object pool is exhausted. + /// OverflowAllocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap. + /// + /// Notes: + /// The nodeCount value must be at least 2, one for a character and one for a terminating 0. + /// + /// As of this writing, the string class necessarily reallocates when an insert of + /// self is done into self. As a result, the fixed_string class doesn't support + /// inserting self into self unless the bEnableOverflow template parameter is true. + /// + /// Example usage: + /// fixed_string fixedString("hello world"); // Can hold up to a strlen of 128. + /// + /// fixedString = "hola mundo"; + /// fixedString.clear(); + /// fixedString.resize(200); + /// fixedString.sprintf("%f", 1.5f); + /// + template + class fixed_string : public basic_string > + { + public: + typedef fixed_vector_allocator fixed_allocator_type; + typedef typename fixed_allocator_type::overflow_allocator_type overflow_allocator_type; + typedef basic_string base_type; + typedef fixed_string this_type; + typedef typename base_type::size_type size_type; + typedef typename base_type::value_type value_type; + typedef typename base_type::CtorDoNotInitialize CtorDoNotInitialize; + typedef typename base_type::CtorSprintf CtorSprintf; + typedef aligned_buffer aligned_buffer_type; + + enum { kMaxSize = nodeCount - 1 }; // -1 because we need to save one element for the silent terminating null. + + using base_type::npos; + using base_type::append; + using base_type::resize; + using base_type::clear; + using base_type::capacity; + using base_type::size; + using base_type::sprintf_va_list; + using base_type::get_allocator; + + protected: + using base_type::mPair; + using base_type::DoAllocate; + using base_type::DoFree; + using base_type::internalLayout; + + union // We define a union in order to avoid strict pointer aliasing issues with compilers like GCC. + { + value_type mArray[1]; + aligned_buffer_type mBuffer; // Question: Why are we doing this aligned_buffer thing? Why not just do an array of value_type, given that we are using just strings of char types. + }; + + public: + fixed_string(); + explicit fixed_string(const overflow_allocator_type& overflowAllocator); // Only applicable if bEnableOverflow is true. + fixed_string(const base_type& x, size_type position, size_type n = base_type::npos); // Currently we don't support overflowAllocator specification for other constructors, for simplicity. + fixed_string(const value_type* p, size_type n); + fixed_string(const value_type* p); + fixed_string(size_type n, const value_type& value); + fixed_string(const this_type& x); + fixed_string(const this_type& x, const overflow_allocator_type& overflowAllocator); + fixed_string(const base_type& x); + fixed_string(const value_type* pBegin, const value_type* pEnd); + fixed_string(CtorDoNotInitialize, size_type n); + fixed_string(CtorSprintf, const value_type* pFormat, ...); + fixed_string(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator); + fixed_string(this_type&& x); + fixed_string(this_type&& x, const overflow_allocator_type& overflowAllocator); + + this_type& operator=(const this_type& x); + this_type& operator=(const base_type& x); + this_type& operator=(const value_type* p); + this_type& operator=(const value_type c); + this_type& operator=(std::initializer_list ilist); + this_type& operator=(this_type&& x); + + void swap(this_type& x); + + void set_capacity(size_type n); + void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs. + size_type max_size() const; + bool full() const; // Returns true if the fixed space has been fully allocated. Note that if overflow is enabled, the container size can be greater than nodeCount but full() could return true because the fixed space may have a recently freed slot. + bool has_overflowed() const; // Returns true if the allocations spilled over into the overflow allocator. Meaningful only if overflow is enabled. + bool can_overflow() const; // Returns the value of the bEnableOverflow template parameter. + + // The inherited versions of substr/left/right call the basic_string constructor, + // which will call the overflow allocator and fail if bEnableOverflow == false + this_type substr(size_type position, size_type n) const; + this_type left(size_type n) const; + this_type right(size_type n) const; + + // OverflowAllocator + const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT; + overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT; + void set_overflow_allocator(const overflow_allocator_type& allocator); + }; // fixed_string + + + + + + /////////////////////////////////////////////////////////////////////// + // fixed_string + /////////////////////////////////////////////////////////////////////// + + template + inline fixed_string::fixed_string() + : base_type(fixed_allocator_type(mBuffer.buffer)) + { + #if EASTL_NAME_ENABLED + get_allocator().set_name(EASTL_FIXED_STRING_DEFAULT_NAME); + #endif + + internalLayout().SetHeapBeginPtr(mArray); + internalLayout().SetHeapCapacity(nodeCount - 1); + internalLayout().SetHeapSize(0); + + *internalLayout().HeapBeginPtr() = 0; + } + + + template + inline fixed_string::fixed_string(const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer.buffer, overflowAllocator)) + { + #if EASTL_NAME_ENABLED + get_allocator().set_name(EASTL_FIXED_STRING_DEFAULT_NAME); + #endif + + internalLayout().SetHeapBeginPtr(mArray); + internalLayout().SetHeapCapacity(nodeCount - 1); + internalLayout().SetHeapSize(0); + + *internalLayout().HeapBeginPtr() = 0; + } + + + template + inline fixed_string::fixed_string(const this_type& x) + : base_type(fixed_allocator_type(mBuffer.buffer)) + { + get_allocator().copy_overflow_allocator(x.get_allocator()); + + #if EASTL_NAME_ENABLED + get_allocator().set_name(x.get_allocator().get_name()); + #endif + + internalLayout().SetHeapBeginPtr(mArray); + internalLayout().SetHeapCapacity(nodeCount - 1); + internalLayout().SetHeapSize(0); + + *internalLayout().HeapBeginPtr() = 0; + + append(x); + } + + + template + inline fixed_string::fixed_string(const this_type& x, const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer.buffer, overflowAllocator)) + { + get_allocator().copy_overflow_allocator(x.get_allocator()); + + #if EASTL_NAME_ENABLED + get_allocator().set_name(x.get_allocator().get_name()); + #endif + + internalLayout().SetHeapBeginPtr(mArray); + internalLayout().SetHeapCapacity(nodeCount - 1); + internalLayout().SetHeapSize(0); + + *internalLayout().HeapBeginPtr() = 0; + + append(x); + } + + + template + inline fixed_string::fixed_string(const base_type& x) + : base_type(fixed_allocator_type(mBuffer.buffer)) + { + #if EASTL_NAME_ENABLED + get_allocator().set_name(x.get_allocator().get_name()); + #endif + + internalLayout().SetHeapBeginPtr(mArray); + internalLayout().SetHeapCapacity(nodeCount - 1); + internalLayout().SetHeapSize(0); + + *internalLayout().HeapBeginPtr() = 0; + + append(x); + } + + + template + inline fixed_string::fixed_string(const base_type& x, size_type position, size_type n) + : base_type(fixed_allocator_type(mBuffer.buffer)) + { + #if EASTL_NAME_ENABLED + get_allocator().set_name(x.get_allocator().get_name()); + #endif + + internalLayout().SetHeapBeginPtr(mArray); + internalLayout().SetHeapCapacity(nodeCount - 1); + internalLayout().SetHeapSize(0); + + *internalLayout().HeapBeginPtr() = 0; + + append(x, position, n); + } + + + template + inline fixed_string::fixed_string(const value_type* p, size_type n) + : base_type(fixed_allocator_type(mBuffer.buffer)) + { + #if EASTL_NAME_ENABLED + get_allocator().set_name(EASTL_FIXED_STRING_DEFAULT_NAME); + #endif + + internalLayout().SetHeapBeginPtr(mArray); + internalLayout().SetHeapCapacity(nodeCount - 1); + internalLayout().SetHeapSize(0); + + *internalLayout().HeapBeginPtr() = 0; + + append(p, n); + } + + + template + inline fixed_string::fixed_string(const value_type* p) + : base_type(fixed_allocator_type(mBuffer.buffer)) + { + #if EASTL_NAME_ENABLED + get_allocator().set_name(EASTL_FIXED_STRING_DEFAULT_NAME); + #endif + + internalLayout().SetHeapBeginPtr(mArray); + internalLayout().SetHeapCapacity(nodeCount - 1); + internalLayout().SetHeapSize(0); + + *internalLayout().HeapBeginPtr() = 0; + + append(p); // There better be enough space to hold the assigned string. + } + + + template + inline fixed_string::fixed_string(size_type n, const value_type& value) + : base_type(fixed_allocator_type(mBuffer.buffer)) + { + #if EASTL_NAME_ENABLED + get_allocator().set_name(EASTL_FIXED_STRING_DEFAULT_NAME); + #endif + + internalLayout().SetHeapBeginPtr(mArray); + internalLayout().SetHeapCapacity(nodeCount - 1); + internalLayout().SetHeapSize(0); + + *internalLayout().HeapBeginPtr() = 0; + + append(n, value); // There better be enough space to hold the assigned string. + } + + + template + inline fixed_string::fixed_string(const value_type* pBegin, const value_type* pEnd) + : base_type(fixed_allocator_type(mBuffer.buffer)) + { + #if EASTL_NAME_ENABLED + get_allocator().set_name(EASTL_FIXED_STRING_DEFAULT_NAME); + #endif + + internalLayout().SetHeapBeginPtr(mArray); + internalLayout().SetHeapCapacity(nodeCount - 1); + internalLayout().SetHeapSize(0); + + *internalLayout().HeapBeginPtr() = 0; + + append(pBegin, pEnd); + } + + + template + inline fixed_string::fixed_string(CtorDoNotInitialize, size_type n) + : base_type(fixed_allocator_type(mBuffer.buffer)) + { + #if EASTL_NAME_ENABLED + get_allocator().set_name(EASTL_FIXED_STRING_DEFAULT_NAME); + #endif + + internalLayout().SetHeapBeginPtr(mArray); + internalLayout().SetHeapCapacity(nodeCount - 1); + + if(n < nodeCount) + { + internalLayout().SetHeapSize(n); + *internalLayout().HeapEndPtr() = 0; + } + else + { + internalLayout().SetHeapSize(0); + *internalLayout().HeapEndPtr() = 0; + + resize(n); + } + } + + + template + inline fixed_string::fixed_string(CtorSprintf, const value_type* pFormat, ...) + : base_type(fixed_allocator_type(mBuffer.buffer)) + { + #if EASTL_NAME_ENABLED + get_allocator().set_name(EASTL_FIXED_STRING_DEFAULT_NAME); + #endif + + internalLayout().SetHeapBeginPtr(mArray); + internalLayout().SetHeapCapacity(nodeCount - 1); + internalLayout().SetHeapSize(0); + *internalLayout().HeapBeginPtr() = 0; + + va_list arguments; + va_start(arguments, pFormat); + sprintf_va_list(pFormat, arguments); + va_end(arguments); + } + + + template + inline fixed_string::fixed_string(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer.buffer, overflowAllocator)) + { + #if EASTL_NAME_ENABLED + get_allocator().set_name(EASTL_FIXED_STRING_DEFAULT_NAME); + #endif + + internalLayout().SetHeapBeginPtr(mArray); + internalLayout().SetHeapCapacity(nodeCount - 1); + internalLayout().SetHeapSize(0); + + *internalLayout().HeapBeginPtr() = 0; + + append(ilist.begin(), ilist.end()); + } + + + template + inline fixed_string::fixed_string(this_type&& x) + : base_type(fixed_allocator_type(mBuffer.buffer)) + { + // We copy from x instead of trade with it. We need to do so because fixed_ containers use local memory buffers. + #if EASTL_NAME_ENABLED + get_allocator().set_name(x.get_allocator().get_name()); + #endif + + internalLayout().SetHeapBeginPtr(mArray); + internalLayout().SetHeapCapacity(nodeCount - 1); + internalLayout().SetHeapSize(0); + + *internalLayout().HeapBeginPtr() = 0; + + append(x); // Let x destruct its own items. + } + + template + inline fixed_string::fixed_string(this_type&& x, const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer.buffer, overflowAllocator)) + { + // We copy from x instead of trade with it. We need to do so because fixed_ containers use local memory buffers. + #if EASTL_NAME_ENABLED + get_allocator().set_name(x.get_allocator().get_name()); + #endif + + internalLayout().SetHeapBeginPtr(mArray); + internalLayout().SetHeapCapacity(nodeCount - 1); + internalLayout().SetHeapSize(0); + + *internalLayout().HeapBeginPtr() = 0; + + append(x); // Let x destruct its own items. + } + + + template + inline typename fixed_string::this_type& + fixed_string::operator=(const this_type& x) + { + if(this != &x) + { + clear(); + + #if EASTL_ALLOCATOR_COPY_ENABLED + get_allocator() = x.get_allocator(); + #endif + + append(x); + } + return *this; + } + + + template + inline typename fixed_string:: + this_type& fixed_string::operator=(const base_type& x) + { + if(static_cast(this) != &x) + { + clear(); + + #if EASTL_ALLOCATOR_COPY_ENABLED + get_allocator() = x.get_allocator(); + #endif + + append(x); + } + return *this; + } + + + template + inline typename fixed_string:: + this_type& fixed_string::operator=(const value_type* p) + { + if(internalLayout().HeapBeginPtr() != p) + { + clear(); + append(p); + } + return *this; + } + + + template + inline typename fixed_string:: + this_type& fixed_string::operator=(const value_type c) + { + clear(); + append((size_type)1, c); + return *this; + } + + + template + inline typename fixed_string:: + this_type& fixed_string::operator=(std::initializer_list ilist) + { + clear(); + append(ilist.begin(), ilist.end()); + return *this; + } + + + template + inline typename fixed_string:: + this_type& fixed_string::operator=(this_type&& x) + { + // We copy from x instead of trade with it. We need to do so because fixed_ containers use local memory buffers. + + // if(static_cast(this) != &x) This should be impossible, so we disable it until proven otherwise. + { + clear(); + + #if EASTL_ALLOCATOR_COPY_ENABLED + get_allocator() = x.get_allocator(); + #endif + + append(x); // Let x destruct its own items. + } + return *this; + } + + + template + inline void fixed_string::swap(this_type& x) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(*this, x); + } + + + template + inline void fixed_string::set_capacity(size_type n) + { + const size_type nPrevSize = internalLayout().GetSize(); + const size_type nPrevCapacity = capacity(); + + if(n == npos) // If the user means to set the capacity so that it equals the size (i.e. free excess capacity)... + n = nPrevSize; + + if(n != nPrevCapacity) // If the request results in a capacity change... + { + const size_type allocSize = (n + 1); // +1 because the terminating 0 isn't included in the supplied capacity value. So now n refers the amount of memory we need. + + if(can_overflow() && (((uintptr_t)internalLayout().HeapBeginPtr() != (uintptr_t)mBuffer.buffer) || (allocSize > kMaxSize))) // If we are or would be using dynamically allocated memory instead of our fixed-size member buffer... + { + T* const pNewData = (allocSize <= kMaxSize) ? (T*)&mBuffer.buffer[0] : DoAllocate(allocSize); + T* const pCopyEnd = (n < nPrevSize) ? (internalLayout().HeapBeginPtr() + n) : internalLayout().HeapEndPtr(); + CharStringUninitializedCopy(internalLayout().HeapBeginPtr(), pCopyEnd, pNewData); // Copy [internalLayout().heap.mpBegin, pCopyEnd) to pNewData. + if((uintptr_t)internalLayout().HeapBeginPtr() != (uintptr_t)mBuffer.buffer) + DoFree(internalLayout().HeapBeginPtr(), internalLayout().GetHeapCapacity() + 1); + + internalLayout().SetHeapSize((size_type)(pCopyEnd - internalLayout().HeapBeginPtr())); + internalLayout().SetHeapBeginPtr(pNewData); + internalLayout().SetHeapCapacity(allocSize - 1); + } // Else the new capacity would be within our fixed buffer. + else if(n < nPrevSize) // If the newly requested capacity is less than our size, we do what vector::set_capacity does and resize, even though we actually aren't reducing the capacity. + resize(n); + } + } + + + template + inline void fixed_string::reset_lose_memory() + { + internalLayout().SetHeapBeginPtr(mArray); + internalLayout().SetHeapSize(0); + internalLayout().SetHeapCapacity(nodeCount - 1); + } + + + template + inline typename fixed_string:: + size_type fixed_string::max_size() const + { + return kMaxSize; + } + + + template + inline bool fixed_string::full() const + { + // If size >= capacity, then we are definitely full. + // Also, if our size is smaller but we've switched away from mBuffer due to a previous overflow, then we are considered full. + return ((size_t)(internalLayout().HeapEndPtr() - internalLayout().HeapBeginPtr()) >= kMaxSize) || ((void*)internalLayout().HeapBeginPtr() != (void*)mBuffer.buffer); + } + + + template + inline bool fixed_string::has_overflowed() const + { + // This will be incorrect for the case that bOverflowEnabled is true and the container was resized + // down to a small size where the fixed buffer could take over ownership of the data again. + // The only simple fix for this is to take on another member variable which tracks whether this overflow + // has occurred at some point in the past. + return ((void*)internalLayout().HeapBeginPtr() != (void*)mBuffer.buffer); + } + + + template + inline bool fixed_string::can_overflow() const + { + return bEnableOverflow; + } + + + template + inline typename fixed_string:: + this_type fixed_string::substr(size_type position, size_type n) const + { + #if EASTL_STRING_OPT_RANGE_ERRORS + if(position > internalLayout().GetSize()) + base_type::ThrowRangeException(); + #endif + + return fixed_string(internalLayout().HeapBeginPtr() + position, + internalLayout().HeapBeginPtr() + position + eastl::min_alt(n, internalLayout().GetSize() - position)); + } + + + template + inline typename fixed_string:: + this_type fixed_string::left(size_type n) const + { + const size_type nLength = size(); + if(n < nLength) + return fixed_string(internalLayout().HeapBeginPtr(), internalLayout().HeapBeginPtr() + n); + return *this; + } + + + template + inline typename fixed_string:: + this_type fixed_string::right(size_type n) const + { + const size_type nLength = size(); + if(n < nLength) + return fixed_string(internalLayout().HeapEndPtr() - n, internalLayout().HeapEndPtr()); + return *this; + } + + + template + inline const typename fixed_string:: + overflow_allocator_type& fixed_string::get_overflow_allocator() const EA_NOEXCEPT + { + return get_allocator().get_overflow_allocator(); + } + + + template + inline typename fixed_string:: + overflow_allocator_type& fixed_string::get_overflow_allocator() EA_NOEXCEPT + { + return get_allocator().get_overflow_allocator(); + } + + + template + inline void + fixed_string::set_overflow_allocator(const overflow_allocator_type& allocator) + { + get_allocator().set_overflow_allocator(allocator); + } + + template + inline size_t hash_string(const T* p) + { + unsigned int c, result = 2166136261U; + while ((c = *p++) != 0) // To consider: limit p to at most 256 chars. + result = (result * 16777619) ^ c; + return (size_t)result; + } + + template + struct hash> + { + size_t operator()(const fixed_string& x) const + { + return hash_string(x.c_str()); + } + }; + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + + // Operator + + template + fixed_string operator+(const fixed_string& a, + const fixed_string& b) + { + // We have a problem here because need to return an fixed_string by value. This will typically result in it + // using stack space equal to its size. That size may be too large to be workable. + typedef fixed_string this_type; + + this_type result(const_cast(a).get_overflow_allocator()); + result.append(a); + result.append(b); + return result; + } + + + template + fixed_string operator+(const typename fixed_string::value_type* p, + const fixed_string& b) + { + typedef fixed_string this_type; + + const typename this_type::size_type n = (typename this_type::size_type)CharStrlen(p); + this_type result(const_cast(b).get_overflow_allocator()); + result.append(p, p + n); + result.append(b); + return result; + } + + + template + fixed_string operator+(typename fixed_string::value_type c, + const fixed_string& b) + { + typedef fixed_string this_type; + + this_type result(const_cast(b).get_overflow_allocator()); + result.push_back(c); + result.append(b); + return result; + } + + + template + fixed_string operator+(const fixed_string& a, + const typename fixed_string::value_type* p) + { + typedef fixed_string this_type; + + const typename this_type::size_type n = (typename this_type::size_type)CharStrlen(p); + this_type result(const_cast(a).get_overflow_allocator()); + result.append(a); + result.append(p, p + n); + return result; + } + + + template + fixed_string operator+(const fixed_string& a, + typename fixed_string::value_type c) + { + typedef fixed_string this_type; + + this_type result(const_cast(a).get_overflow_allocator()); + result.append(a); + result.push_back(c); + return result; + } + + + template + fixed_string operator+(fixed_string&& a, + fixed_string&& b) + { + a.append(b); // Using an rvalue by name results in it becoming an lvalue. + return eastl::move(a); + } + + template + fixed_string operator+(fixed_string&& a, + const fixed_string& b) + { + a.append(b); + return eastl::move(a); + } + + template + fixed_string operator+(const typename fixed_string::value_type* p, + fixed_string&& b) + { + b.insert(0, p); + return eastl::move(b); + } + + template + fixed_string operator+(fixed_string&& a, + const typename fixed_string::value_type* p) + { + a.append(p); + return eastl::move(a); + } + + template + fixed_string operator+(fixed_string&& a, + typename fixed_string::value_type c) + { + a.push_back(c); + return eastl::move(a); + } + + + // operator ==, !=, <, >, <=, >= come from the string implementations. + + template + inline void swap(fixed_string& a, + fixed_string& b) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(a, b); + } + + +} // namespace eastl + +#endif // Header include guard diff --git a/external/EASTL/include/EASTL/fixed_substring.h b/external/EASTL/include/EASTL/fixed_substring.h new file mode 100644 index 00000000..5ff51368 --- /dev/null +++ b/external/EASTL/include/EASTL/fixed_substring.h @@ -0,0 +1,275 @@ +/////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_FIXED_SUBSTRING_H +#define EASTL_FIXED_SUBSTRING_H + + +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + + /// fixed_substring + /// + /// Implements a string which is a reference to a segment of characters. + /// This class is efficient because it allocates no memory and copies no + /// memory during construction and assignment, but rather refers directly + /// to the segment of chracters. A common use of this is to have a + /// fixed_substring efficiently refer to a substring within another string. + /// + /// You cannot directly resize a fixed_substring (e.g. via resize, insert, + /// append, erase), but you can assign a different substring to it. + /// You can modify the characters within a substring in place. + /// As of this writing, in the name of being lean and simple it is the + /// user's responsibility to not call unsupported resizing functions + /// such as those listed above. A detailed listing of the functions which + /// are not supported is given below in the class declaration. + /// + /// The c_str function doesn't act as one might hope, as it simply + /// returns the pointer to the beginning of the string segment and the + /// 0-terminator may be beyond the end of the segment. If you want to + /// always be able to use c_str as expected, use the fixed string solution + /// we describe below. + /// + /// Another use of fixed_substring is to provide C++ string-like functionality + /// with a C character array. This allows you to work on a C character array + /// as if it were a C++ string as opposed using the C string API. Thus you + /// can do this: + /// + /// void DoSomethingForUser(char* timeStr, size_t timeStrCapacity) + /// { + /// fixed_substring tmp(timeStr, timeStrCapacity); + /// tmp = "hello "; + /// tmp += "world"; + /// } + /// + /// Note that this class constructs and assigns from const string pointers + /// and const string objects, yet this class does not declare its member + /// data as const. This is a concession in order to allow this implementation + /// to be simple and lean. It is the user's responsibility to make sure + /// that strings that should not or can not be modified are either not + /// used by fixed_substring or are not modified by fixed_substring. + /// + /// A more flexible alternative to fixed_substring is fixed_string. + /// fixed_string has none of the functional limitations that fixed_substring + /// has and like fixed_substring it doesn't allocate memory. However, + /// fixed_string makes a *copy* of the source string and uses local + /// memory to store that copy. Also, fixed_string objects on the stack + /// are going to have a limit as to their maximum size. + /// + /// Notes: + /// As of this writing, the string class necessarily reallocates when + /// an insert of self is done into self. As a result, the fixed_substring + /// class doesn't support inserting self into self. + /// + /// Example usage: + /// basic_string str("hello world"); + /// fixed_substring sub(str, 2, 5); // sub == "llo w" + /// + template + class fixed_substring : public basic_string + { + public: + typedef basic_string base_type; + typedef fixed_substring this_type; + typedef typename base_type::size_type size_type; + typedef typename base_type::value_type value_type; + typedef typename base_type::iterator iterator; + typedef typename base_type::const_iterator const_iterator; + + using base_type::npos; + using base_type::get_allocator; + + private: + using base_type::mPair; + using base_type::AllocateSelf; + using base_type::internalLayout; + + void SetInternalHeapLayout(value_type* pBeginPtr, size_type nSize, size_type nCap) + { + internalLayout().SetHeapBeginPtr(pBeginPtr); + internalLayout().SetHeapSize(nSize); + internalLayout().SetHeapCapacity(nCap); + } + + + public: + fixed_substring() + : base_type() + { + } + + fixed_substring(const fixed_substring& x) + : fixed_substring(static_cast(x)) + {} + + fixed_substring(const base_type& x) + : base_type() + { + #if EASTL_NAME_ENABLED + get_allocator().set_name(x.get_allocator().get_name()); + #endif + + assign(x); + } + + // We gain no benefit from having an rvalue move constructor or assignment operator, + // as this class is a const class. + + fixed_substring(const base_type& x, size_type position, size_type n = base_type::npos) + : base_type() + { + #if EASTL_NAME_ENABLED + get_allocator().set_name(x.get_allocator().get_name()); + #endif + + assign(x, position, n); + } + + fixed_substring(const value_type* p, size_type n) + : base_type() + { + assign(p, n); + } + + fixed_substring(const value_type* p) + : base_type() + { + assign(p); + } + + fixed_substring(const value_type* pBegin, const value_type* pEnd) + : base_type() + { + assign(pBegin, pEnd); + } + + ~fixed_substring() + { + // We need to reset, as otherwise the parent destructor will + // attempt to free our memory. + AllocateSelf(); + } + + this_type& operator=(const this_type& x) + { + assign(x); + return *this; + } + + this_type& operator=(const base_type& x) + { + assign(x); + return *this; + } + + this_type& operator=(const value_type* p) + { + assign(p); + return *this; + } + + this_type& assign(const base_type& x) + { + // By design, we need to cast away const-ness here. + SetInternalHeapLayout(const_cast(x.data()), x.size(), x.size()); + return *this; + } + + this_type& assign(const base_type& x, size_type position, size_type n) + { + // By design, we need to cast away const-ness here. + SetInternalHeapLayout(const_cast(x.data()) + position, n, n); + return *this; + } + + this_type& assign(const value_type* p, size_type n) + { + // By design, we need to cast away const-ness here. + SetInternalHeapLayout(const_cast(p), n, n); + return *this; + } + + this_type& assign(const value_type* p) + { + // By design, we need to cast away const-ness here. + SetInternalHeapLayout(const_cast(p), (size_type)CharStrlen(p), (size_type)CharStrlen(p)); + return *this; + } + + this_type& assign(const value_type* pBegin, const value_type* pEnd) + { + // By design, we need to cast away const-ness here. + SetInternalHeapLayout(const_cast(pBegin), (size_type)(pEnd - pBegin), (size_type)(pEnd - pBegin)); + return *this; + } + + + // Partially supported functionality + // + // When using fixed_substring on a character sequence that is within another + // string, the following functions may do one of two things: + // 1 Attempt to reallocate + // 2 Write a 0 char at the end of the fixed_substring + // + // Item #1 will result in a crash, due to the attempt by the underlying + // string class to free the substring memory. Item #2 will result in a 0 + // char being written to the character array. Item #2 may or may not be + // a problem, depending on how you use fixed_substring. Thus the following + // functions cannot be used safely. + + #if 0 // !defined(EA_COMPILER_NO_DELETED_FUNCTIONS) We may want to enable these deletions after some investigation of possible user impact. + this_type& operator=(value_type c) = delete; + void resize(size_type n, value_type c) = delete; + void resize(size_type n) = delete; + void reserve(size_type = 0) = delete; + void set_capacity(size_type n) = delete; + void clear() = delete; + this_type& operator+=(const base_type& x) = delete; + this_type& operator+=(const value_type* p) = delete; + this_type& operator+=(value_type c) = delete; + this_type& append(const base_type& x) = delete; + this_type& append(const base_type& x, size_type position, size_type n) = delete; + this_type& append(const value_type* p, size_type n) = delete; + this_type& append(const value_type* p) = delete; + this_type& append(size_type n) = delete; + this_type& append(size_type n, value_type c) = delete; + this_type& append(const value_type* pBegin, const value_type* pEnd) = delete; + this_type& append_sprintf_va_list(const value_type* pFormat, va_list arguments) = delete; + this_type& append_sprintf(const value_type* pFormat, ...) = delete; + void push_back(value_type c) = delete; + void pop_back() = delete; + this_type& assign(size_type n, value_type c) = delete; + this_type& insert(size_type position, const base_type& x) = delete; + this_type& insert(size_type position, const base_type& x, size_type beg, size_type n) = delete; + this_type& insert(size_type position, const value_type* p, size_type n) = delete; + this_type& insert(size_type position, const value_type* p) = delete; + this_type& insert(size_type position, size_type n, value_type c) = delete; + iterator insert(const_iterator p, value_type c) = delete; + void insert(const_iterator p, size_type n, value_type c) = delete; + void insert(const_iterator p, const value_type* pBegin, const value_type* pEnd) = delete; + this_type& erase(size_type position = 0, size_type n = npos) = delete; + iterator erase(const_iterator p) = delete; + iterator erase(const_iterator pBegin, const_iterator pEnd) = delete; + void swap(base_type& x) = delete; + this_type& sprintf_va_list(const value_type* pFormat, va_list arguments) = delete; + this_type& sprintf(const value_type* pFormat, ...) = delete; + #endif + + }; // fixed_substring + + +} // namespace eastl + + + +#endif // Header include guard diff --git a/external/EASTL/include/EASTL/fixed_vector.h b/external/EASTL/include/EASTL/fixed_vector.h new file mode 100644 index 00000000..b588d9f1 --- /dev/null +++ b/external/EASTL/include/EASTL/fixed_vector.h @@ -0,0 +1,618 @@ +/////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +/////////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// This file implements a vector which uses a fixed size memory pool. +// The bEnableOverflow template parameter allows the container to resort to +// heap allocations if the memory pool is exhausted. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_FIXED_VECTOR_H +#define EASTL_FIXED_VECTOR_H + + +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + /// EASTL_FIXED_VECTOR_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// In the case of fixed-size containers, the allocator name always refers + /// to overflow allocations. + /// + #ifndef EASTL_FIXED_VECTOR_DEFAULT_NAME + #define EASTL_FIXED_VECTOR_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_vector" // Unless the user overrides something, this is "EASTL fixed_vector". + #endif + + + /// EASTL_FIXED_VECTOR_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_FIXED_VECTOR_DEFAULT_ALLOCATOR + #define EASTL_FIXED_VECTOR_DEFAULT_ALLOCATOR overflow_allocator_type(EASTL_FIXED_VECTOR_DEFAULT_NAME) + #endif + + + /// fixed_vector + /// + /// A fixed_vector with bEnableOverflow == true is identical to a regular + /// vector in terms of its behavior. All the expectations of regular vector + /// apply to it and no additional expectations come from it. When bEnableOverflow + /// is false, fixed_vector behaves like regular vector with the exception that + /// its capacity can never increase. All operations you do on such a fixed_vector + /// which require a capacity increase will result in undefined behavior or an + /// C++ allocation exception, depending on the configuration of EASTL. + /// + /// Template parameters: + /// T The type of object the vector holds. + /// nodeCount The max number of objects to contain. + /// bEnableOverflow Whether or not we should use the overflow heap if our object pool is exhausted. + /// OverflowAllocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap. + /// + /// Note: The nodeCount value must be at least 1. + /// + /// Example usage: + /// fixed_vector fixedVector); + /// + /// fixedVector.push_back(Widget()); + /// fixedVector.resize(200); + /// fixedVector.clear(); + /// + template ::type> + class fixed_vector : public vector > + { + public: + typedef fixed_vector_allocator fixed_allocator_type; + typedef OverflowAllocator overflow_allocator_type; + typedef vector base_type; + typedef fixed_vector this_type; + typedef typename base_type::size_type size_type; + typedef typename base_type::value_type value_type; + typedef typename base_type::reference reference; + typedef typename base_type::iterator iterator; + typedef typename base_type::const_iterator const_iterator; + typedef aligned_buffer aligned_buffer_type; + + enum { kMaxSize = nodeCount }; + + using base_type::get_allocator; + using base_type::resize; + using base_type::clear; + using base_type::size; + using base_type::assign; + using base_type::npos; + +#if EA_IS_ENABLED(EASTL_DEPRECATIONS_FOR_2024_APRIL) + static_assert(!is_const::value, "fixed_vector value_type must be non-const."); + static_assert(!is_volatile::value, "fixed_vector value_type must be non-volatile."); +#endif + + protected: + aligned_buffer_type mBuffer; + + using base_type::mpBegin; + using base_type::mpEnd; + using base_type::internalCapacityPtr; + using base_type::DoAllocate; + using base_type::DoFree; + using base_type::DoAssign; + + public: + fixed_vector(); + explicit fixed_vector(const overflow_allocator_type& overflowAllocator); // Only applicable if bEnableOverflow is true. + explicit fixed_vector(size_type n); // Currently we don't support overflowAllocator specification for other constructors, for simplicity. + fixed_vector(size_type n, const value_type& value); + fixed_vector(const this_type& x); + fixed_vector(this_type&& x); + fixed_vector(this_type&& x, const overflow_allocator_type& overflowAllocator); + fixed_vector(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator = EASTL_FIXED_VECTOR_DEFAULT_ALLOCATOR); + + template + fixed_vector(InputIterator first, InputIterator last); + + this_type& operator=(const this_type& x); + this_type& operator=(std::initializer_list ilist); + this_type& operator=(this_type&& x); + + void swap(this_type& x); + + void set_capacity(size_type n); + void clear(bool freeOverflow); + void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs. + size_type max_size() const; // Returns the max fixed size, which is the user-supplied nodeCount parameter. + bool full() const; // Returns true if the fixed space has been fully allocated. Note that if overflow is enabled, the container size can be greater than nodeCount but full() could return true because the fixed space may have a recently freed slot. + bool has_overflowed() const; // Returns true if the allocations spilled over into the overflow allocator. Meaningful only if overflow is enabled. + bool can_overflow() const; // Returns the value of the bEnableOverflow template parameter. + + void* push_back_uninitialized(); + void push_back(const value_type& value); // We implement push_back here because we have a specialization that's + reference push_back(); // smaller for the case of overflow being disabled. + void push_back(value_type&& value); + + // OverflowAllocator + const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT; + overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT; + void set_overflow_allocator(const overflow_allocator_type& allocator); + + protected: + void* DoPushBackUninitialized(true_type); + void* DoPushBackUninitialized(false_type); + + void DoPushBack(true_type, const value_type& value); + void DoPushBack(false_type, const value_type& value); + + void DoPushBackMove(true_type, value_type&& value); + void DoPushBackMove(false_type, value_type&& value); + + reference DoPushBack(false_type); + reference DoPushBack(true_type); + + }; // fixed_vector + + + + + /////////////////////////////////////////////////////////////////////// + // fixed_vector + /////////////////////////////////////////////////////////////////////// + + template + inline fixed_vector::fixed_vector() + : base_type(fixed_allocator_type(mBuffer.buffer)) + { + #if EASTL_NAME_ENABLED + get_allocator().set_name(EASTL_FIXED_VECTOR_DEFAULT_NAME); + #endif + + mpBegin = mpEnd = (value_type*)&mBuffer.buffer[0]; + internalCapacityPtr() = mpBegin + nodeCount; + } + + template + inline fixed_vector::fixed_vector(const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer.buffer, overflowAllocator)) + { + #if EASTL_NAME_ENABLED + get_allocator().set_name(EASTL_FIXED_VECTOR_DEFAULT_NAME); + #endif + + mpBegin = mpEnd = (value_type*)&mBuffer.buffer[0]; + internalCapacityPtr() = mpBegin + nodeCount; + } + + template + inline fixed_vector::fixed_vector(size_type n) + : base_type(fixed_allocator_type(mBuffer.buffer)) + { + #if EASTL_NAME_ENABLED + get_allocator().set_name(EASTL_FIXED_VECTOR_DEFAULT_NAME); + #endif + + mpBegin = mpEnd = (value_type*)&mBuffer.buffer[0]; + internalCapacityPtr() = mpBegin + nodeCount; + resize(n); + } + + + template + inline fixed_vector::fixed_vector(size_type n, const value_type& value) + : base_type(fixed_allocator_type(mBuffer.buffer)) + { + #if EASTL_NAME_ENABLED + get_allocator().set_name(EASTL_FIXED_VECTOR_DEFAULT_NAME); + #endif + + mpBegin = mpEnd = (value_type*)&mBuffer.buffer[0]; + internalCapacityPtr() = mpBegin + nodeCount; + resize(n, value); + } + + + template + inline fixed_vector::fixed_vector(const this_type& x) + : base_type(fixed_allocator_type(mBuffer.buffer)) + { + get_allocator().copy_overflow_allocator(x.get_allocator()); + + #if EASTL_NAME_ENABLED + get_allocator().set_name(x.get_allocator().get_name()); + #endif + + mpBegin = mpEnd = (value_type*)&mBuffer.buffer[0]; + internalCapacityPtr() = mpBegin + nodeCount; + base_type::template DoAssign(x.begin(), x.end(), false_type()); + } + + + template + inline fixed_vector::fixed_vector(this_type&& x) + : base_type(fixed_allocator_type(mBuffer.buffer)) + { + // Since we are a fixed_vector, we can't swap pointers. We can possibly do something like fixed_swap or + // we can just do an assignment from x. If we want to do the former then we need to have some complicated + // code to deal with overflow or no overflow, and whether the memory is in the fixed-size buffer or in + // the overflow allocator. 90% of the time the memory should be in the fixed buffer, in which case + // a simple assignment is no worse than the fancy pathway. + + // Since we are a fixed_vector, we can't normally swap pointers unless both this and + // x are using using overflow and the overflow allocators are equal. To do: + //if(has_overflowed() && x.has_overflowed() && (get_overflow_allocator() == x.get_overflow_allocator())) + //{ + // We can swap contents and may need to swap the allocators as well. + //} + get_allocator().copy_overflow_allocator(x.get_allocator()); + + #if EASTL_NAME_ENABLED + get_allocator().set_name(x.get_allocator().get_name()); + #endif + + mpBegin = mpEnd = (value_type*)&mBuffer.buffer[0]; + internalCapacityPtr() = mpBegin + nodeCount; + base_type::template DoAssign, true>(eastl::make_move_iterator(x.begin()), eastl::make_move_iterator(x.end()), false_type()); + } + + + template + inline fixed_vector::fixed_vector(this_type&& x, const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer.buffer, overflowAllocator)) + { + // Since we are not swapping the allocated buffers but simply move the elements, we do not have to care about allocator compatibility. + mpBegin = mpEnd = (value_type*)&mBuffer.buffer[0]; + internalCapacityPtr() = mpBegin + nodeCount; + base_type::template DoAssign, true>(eastl::make_move_iterator(x.begin()), eastl::make_move_iterator(x.end()), false_type()); + } + + + template + inline fixed_vector::fixed_vector(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer.buffer, overflowAllocator)) + { + typedef typename std::initializer_list::iterator InputIterator; + typedef typename eastl::iterator_traits::iterator_category IC; + + mpBegin = mpEnd = (value_type*)&mBuffer.buffer[0]; + internalCapacityPtr() = mpBegin + nodeCount; + base_type::template DoAssignFromIterator(ilist.begin(), ilist.end(), IC()); + } + + + template + template + fixed_vector::fixed_vector(InputIterator first, InputIterator last) + : base_type(fixed_allocator_type(mBuffer.buffer)) + { + #if EASTL_NAME_ENABLED + get_allocator().set_name(EASTL_FIXED_VECTOR_DEFAULT_NAME); + #endif + + mpBegin = mpEnd = (value_type*)&mBuffer.buffer[0]; + internalCapacityPtr() = mpBegin + nodeCount; + base_type::template DoAssign(first, last, is_integral()); + } + + + template + inline typename fixed_vector::this_type& + fixed_vector::operator=(const this_type& x) + { + if(this != &x) + { + clear(); + + #if EASTL_ALLOCATOR_COPY_ENABLED + get_allocator() = x.get_allocator(); // The primary effect of this is to copy the overflow allocator. + #endif + + base_type::template DoAssign(x.begin(), x.end(), false_type()); // Shorter route. + } + return *this; + } + + + template + inline typename fixed_vector::this_type& + fixed_vector::operator=(std::initializer_list ilist) + { + typedef typename std::initializer_list::iterator InputIterator; + typedef typename eastl::iterator_traits::iterator_category IC; + + clear(); + base_type::template DoAssignFromIterator(ilist.begin(), ilist.end(), IC()); + return *this; + } + + + template + inline typename fixed_vector::this_type& + fixed_vector::operator=(this_type&& x) + { + // Since we are a fixed_vector, we can't swap pointers. We can possibly do something like fixed_swap or + // we can just do an assignment from x. If we want to do the former then we need to have some complicated + // code to deal with overflow or no overflow, and whether the memory is in the fixed-size buffer or in + // the overflow allocator. 90% of the time the memory should be in the fixed buffer, in which case + // a simple assignment is no worse than the fancy pathway. + if (this != &x) + { + clear(); + + #if EASTL_ALLOCATOR_COPY_ENABLED + get_allocator() = x.get_allocator(); // The primary effect of this is to copy the overflow allocator. + #endif + + base_type::template DoAssign, true>(eastl::make_move_iterator(x.begin()), eastl::make_move_iterator(x.end()), false_type()); // Shorter route. + } + return *this; + } + + + template + inline void fixed_vector::swap(this_type& x) + { + if((has_overflowed() && x.has_overflowed()) && (get_overflow_allocator() == x.get_overflow_allocator())) // If both containers are using the heap instead of local memory + { // then we can do a fast pointer swap instead of content swap. + eastl::swap(mpBegin, x.mpBegin); + eastl::swap(mpEnd, x.mpEnd); + eastl::swap(internalCapacityPtr(), x.internalCapacityPtr()); + } + else + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(*this, x); + } + } + + + template + inline void fixed_vector::set_capacity(size_type n) + { + const size_type nPrevSize = (size_type)(mpEnd - mpBegin); + const size_type nPrevCapacity = (size_type)(internalCapacityPtr() - mpBegin); + + if(n == npos) // If the user means to set the capacity so that it equals the size (i.e. free excess capacity)... + n = nPrevSize; + + if(n != nPrevCapacity) // If the request results in a capacity change... + { + if(can_overflow() && (((uintptr_t)mpBegin != (uintptr_t)mBuffer.buffer) || (n > kMaxSize))) // If we are or would be using dynamically allocated memory instead of our fixed-size member buffer... + { + T* const pNewData = (n <= kMaxSize) ? (T*)&mBuffer.buffer[0] : DoAllocate(n); + T* const pCopyEnd = (n < nPrevSize) ? (mpBegin + n) : mpEnd; + eastl::uninitialized_move(mpBegin, pCopyEnd, pNewData); // Move [mpBegin, pCopyEnd) to p. + eastl::destruct(mpBegin, mpEnd); + if((uintptr_t)mpBegin != (uintptr_t)mBuffer.buffer) + DoFree(mpBegin, (size_type)(internalCapacityPtr() - mpBegin)); + + mpEnd = pNewData + (pCopyEnd - mpBegin); + mpBegin = pNewData; + internalCapacityPtr() = mpBegin + n; + } // Else the new capacity would be within our fixed buffer. + else if(n < nPrevSize) // If the newly requested capacity is less than our size, we do what vector::set_capacity does and resize, even though we actually aren't reducing the capacity. + resize(n); + } + } + + + template + inline void fixed_vector::clear(bool freeOverflow) + { + base_type::clear(); + if (freeOverflow && mpBegin != (value_type*)&mBuffer.buffer[0]) + { + EASTLFree(get_allocator(), mpBegin, (internalCapacityPtr() - mpBegin) * sizeof(T)); + mpBegin = mpEnd = (value_type*)&mBuffer.buffer[0]; + internalCapacityPtr() = mpBegin + nodeCount; + } + } + + + template + inline void fixed_vector::reset_lose_memory() + { + mpBegin = mpEnd = (value_type*)&mBuffer.buffer[0]; + internalCapacityPtr() = mpBegin + nodeCount; + } + + + template + inline typename fixed_vector::size_type + fixed_vector::max_size() const + { + return kMaxSize; + } + + + template + inline bool fixed_vector::full() const + { + // If size >= capacity, then we are definitely full. + // Also, if our size is smaller but we've switched away from mBuffer due to a previous overflow, then we are considered full. + return ((size_t)(mpEnd - mpBegin) >= kMaxSize) || ((void*)mpBegin != (void*)mBuffer.buffer); + } + + + template + inline bool fixed_vector::has_overflowed() const + { + // This will be incorrect for the case that bOverflowEnabled is true and the container was resized + // down to a small size where the fixed buffer could take over ownership of the data again. + // The only simple fix for this is to take on another member variable which tracks whether this overflow + // has occurred at some point in the past. + return ((void*)mpBegin != (void*)mBuffer.buffer); + } + + + template + inline bool fixed_vector::can_overflow() const + { + return bEnableOverflow; + } + + + template + inline void* fixed_vector::push_back_uninitialized() + { + return DoPushBackUninitialized(typename conditional::type()); + } + + + template + inline void* fixed_vector::DoPushBackUninitialized(true_type) + { + return base_type::push_back_uninitialized(); + } + + + template + inline void* fixed_vector::DoPushBackUninitialized(false_type) + { + EASTL_ASSERT(mpEnd < internalCapacityPtr()); + + return mpEnd++; + } + + + template + inline void fixed_vector::push_back(const value_type& value) + { + DoPushBack(typename conditional::type(), value); + } + + + template + inline void fixed_vector::DoPushBack(true_type, const value_type& value) + { + base_type::push_back(value); + } + + + // This template specializes for overflow NOT enabled. + // In this configuration, there is no need for the heavy weight push_back() which tests to see if the container should grow (it never will) + template + inline void fixed_vector::DoPushBack(false_type, const value_type& value) + { + EASTL_ASSERT(mpEnd < internalCapacityPtr()); + + ::new((void*)mpEnd++) value_type(value); + } + + + template + inline typename fixed_vector::reference fixed_vector::push_back() + { + return DoPushBack(typename conditional::type()); + } + + + template + inline typename fixed_vector::reference fixed_vector::DoPushBack(true_type) + { + return base_type::push_back(); + } + + + // This template specializes for overflow NOT enabled. + // In this configuration, there is no need for the heavy weight push_back() which tests to see if the container should grow (it never will) + template + inline typename fixed_vector::reference fixed_vector::DoPushBack(false_type) + { + EASTL_ASSERT(mpEnd < internalCapacityPtr()); + + ::new((void*)mpEnd++) value_type; // Note that this isn't value_type() as that syntax doesn't work on all compilers for POD types. + + return *(mpEnd - 1); // Same as return back(); + } + + + template + inline void fixed_vector::push_back(value_type&& value) + { + DoPushBackMove(typename conditional::type(), eastl::move(value)); + } + + + template + inline void fixed_vector::DoPushBackMove(true_type, value_type&& value) + { + base_type::push_back(eastl::move(value)); // This will call vector::push_back(value_type &&), and possibly swap value with *mpEnd. + } + + + // This template specializes for overflow NOT enabled. + // In this configuration, there is no need for the heavy weight push_back() which tests to see if the container should grow (it never will) + template + inline void fixed_vector::DoPushBackMove(false_type, value_type&& value) + { + EASTL_ASSERT(mpEnd < internalCapacityPtr()); + + ::new((void*)mpEnd++) value_type(eastl::move(value)); // This will call the value_type(value_type&&) constructor, and possibly swap value with *mpEnd. + } + + + template + inline const typename fixed_vector::overflow_allocator_type& + fixed_vector::get_overflow_allocator() const EA_NOEXCEPT + { + return get_allocator().get_overflow_allocator(); + } + + + template + inline typename fixed_vector::overflow_allocator_type& + fixed_vector::get_overflow_allocator() EA_NOEXCEPT + { + return get_allocator().get_overflow_allocator(); + } + + + template + inline void + fixed_vector::set_overflow_allocator(const overflow_allocator_type& allocator) + { + get_allocator().set_overflow_allocator(allocator); + } + + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + // operator ==, !=, <, >, <=, >= come from the vector implementations. + + template + inline void swap(fixed_vector& a, + fixed_vector& b) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(a, b); + } + + + +} // namespace eastl + + + +#endif // Header include guard + + + + + + + + + + + + diff --git a/external/EASTL/include/EASTL/functional.h b/external/EASTL/include/EASTL/functional.h new file mode 100644 index 00000000..89312db4 --- /dev/null +++ b/external/EASTL/include/EASTL/functional.h @@ -0,0 +1,1370 @@ +/////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_FUNCTIONAL_H +#define EASTL_FUNCTIONAL_H + + +#include +#include +#include +#include +#include +#include + + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + +// 4512/4626 - 'class' : assignment operator could not be generated. // This disabling would best be put elsewhere. +EA_DISABLE_VC_WARNING(4512 4626); + + +namespace eastl +{ + /////////////////////////////////////////////////////////////////////// + // Primary C++ functions + /////////////////////////////////////////////////////////////////////// + + template + struct plus + { + EA_CPP14_CONSTEXPR T operator()(const T& a, const T& b) const + { return a + b; } + }; + + // http://en.cppreference.com/w/cpp/utility/functional/plus_void + template <> + struct plus + { + typedef int is_transparent; + template + EA_CPP14_CONSTEXPR auto operator()(A&& a, B&& b) const + -> decltype(eastl::forward(a) + eastl::forward(b)) + { return eastl::forward(a) + eastl::forward(b); } + }; + + template + struct minus + { + EA_CPP14_CONSTEXPR T operator()(const T& a, const T& b) const + { return a - b; } + }; + + // http://en.cppreference.com/w/cpp/utility/functional/minus_void + template <> + struct minus + { + typedef int is_transparent; + template + EA_CPP14_CONSTEXPR auto operator()(A&& a, B&& b) const + -> decltype(eastl::forward(a) - eastl::forward(b)) + { return eastl::forward(a) - eastl::forward(b); } + }; + + template + struct multiplies + { + EA_CPP14_CONSTEXPR T operator()(const T& a, const T& b) const + { return a * b; } + }; + + // http://en.cppreference.com/w/cpp/utility/functional/multiplies_void + template <> + struct multiplies + { + typedef int is_transparent; + template + EA_CPP14_CONSTEXPR auto operator()(A&& a, B&& b) const + -> decltype(eastl::forward(a) * eastl::forward(b)) + { return eastl::forward(a) * eastl::forward(b); } + }; + + template + struct divides + { + EA_CPP14_CONSTEXPR T operator()(const T& a, const T& b) const + { return a / b; } + }; + + // http://en.cppreference.com/w/cpp/utility/functional/divides_void + template <> + struct divides + { + typedef int is_transparent; + template + EA_CPP14_CONSTEXPR auto operator()(A&& a, B&& b) const + -> decltype(eastl::forward(a) / eastl::forward(b)) + { return eastl::forward(a) / eastl::forward(b); } + }; + + template + struct modulus + { + EA_CPP14_CONSTEXPR T operator()(const T& a, const T& b) const + { return a % b; } + }; + + // http://en.cppreference.com/w/cpp/utility/functional/modulus_void + template <> + struct modulus + { + typedef int is_transparent; + template + EA_CPP14_CONSTEXPR auto operator()(A&& a, B&& b) const + -> decltype(eastl::forward(a) % eastl::forward(b)) + { return eastl::forward(a) % eastl::forward(b); } + }; + + template + struct negate + { + EA_CPP14_CONSTEXPR T operator()(const T& a) const + { return -a; } + }; + + // http://en.cppreference.com/w/cpp/utility/functional/negate_void + template <> + struct negate + { + typedef int is_transparent; + template + EA_CPP14_CONSTEXPR auto operator()(T&& t) const + -> decltype(-eastl::forward(t)) + { return -eastl::forward(t); } + }; + + template + struct equal_to + { + EA_CPP14_CONSTEXPR bool operator()(const T& a, const T& b) const + { return a == b; } + }; + + // http://en.cppreference.com/w/cpp/utility/functional/equal_to_void + template <> + struct equal_to + { + typedef int is_transparent; + template + EA_CPP14_CONSTEXPR auto operator()(A&& a, B&& b) const + -> decltype(eastl::forward(a) == eastl::forward(b)) + { return eastl::forward(a) == eastl::forward(b); } + }; + + template + bool validate_equal_to(const T& a, const T& b, Compare compare) + { + return compare(a, b) == compare(b, a); + } + + template + struct not_equal_to + { + EA_CPP14_CONSTEXPR bool operator()(const T& a, const T& b) const + { return a != b; } + }; + + // http://en.cppreference.com/w/cpp/utility/functional/not_equal_to_void + template <> + struct not_equal_to + { + typedef int is_transparent; + template + EA_CPP14_CONSTEXPR auto operator()(A&& a, B&& b) const + -> decltype(eastl::forward(a) != eastl::forward(b)) + { return eastl::forward(a) != eastl::forward(b); } + }; + + template + bool validate_not_equal_to(const T& a, const T& b, Compare compare) + { + return compare(a, b) == compare(b, a); // We want the not equal comparison results to be equal. + } + + /// str_equal_to + /// + /// Compares two 0-terminated string types. + /// The T types are expected to be iterators or act like iterators. + /// The expected behavior of str_less is the same as (strcmp(p1, p2) == 0). + /// + /// Example usage: + /// hash_set, str_equal_to > stringHashSet; + /// + /// Note: + /// You couldn't use str_equal_to like this: + /// bool result = equal("hi", "hi" + 2, "ho", str_equal_to()); + /// This is because equal tests an array of something, with each element by + /// the comparison function. But str_equal_to tests an array of something itself. + /// + /// To consider: Update this code to use existing word-based comparison optimizations, + /// such as that used in the EAStdC Strcmp function. + /// + template + struct str_equal_to + { + EA_CPP14_CONSTEXPR bool operator()(T a, T b) const + { + while(*a && (*a == *b)) + { + ++a; + ++b; + } + return (*a == *b); + } + }; + + template + struct greater + { + EA_CPP14_CONSTEXPR bool operator()(const T& a, const T& b) const + { return a > b; } + }; + + // http://en.cppreference.com/w/cpp/utility/functional/greater_void + template <> + struct greater + { + template + EA_CPP14_CONSTEXPR auto operator()(A&& a, B&& b) const + -> decltype(eastl::forward(a) > eastl::forward(b)) + { return eastl::forward(a) > eastl::forward(b); } + }; + + template + bool validate_greater(const T& a, const T& b, Compare compare) + { + return !compare(a, b) || !compare(b, a); // If (a > b), then !(b > a) + } + + + template + bool validate_less(const T& a, const T& b, Compare compare) + { + return !compare(a, b) || !compare(b, a); // If (a < b), then !(b < a) + } + + /// str_less + /// + /// Compares two 0-terminated string types. + /// The T types are expected to be iterators or act like iterators, + /// and that includes being a pointer to a C character array. + /// The expected behavior of str_less is the same as (strcmp(p1, p2) < 0). + /// This function is not Unicode-correct and it's not guaranteed to work + /// with all Unicode strings. + /// + /// Example usage: + /// set > stringSet; + /// + /// To consider: Update this code to use existing word-based comparison optimizations, + /// such as that used in the EAStdC Strcmp function. + /// + template + struct str_less + { + bool operator()(T a, T b) const + { + while(static_cast::type>::type>(*a) == + static_cast::type>::type>(*b)) + { + if(*a == 0) + return (*b != 0); + ++a; + ++b; + } + + char aValue = static_cast::type>(*a); + char bValue = static_cast::type>(*b); + + typename make_unsigned::type aValueU = static_cast::type>(aValue); + typename make_unsigned::type bValueU = static_cast::type>(bValue); + + return aValueU < bValueU; + + //return (static_cast::type>::type>(*a) < + // static_cast::type>::type>(*b)); + } + }; + + template + struct greater_equal + { + EA_CPP14_CONSTEXPR bool operator()(const T& a, const T& b) const + { return a >= b; } + }; + + // http://en.cppreference.com/w/cpp/utility/functional/greater_equal_void + template <> + struct greater_equal + { + template + EA_CPP14_CONSTEXPR auto operator()(A&& a, B&& b) const + -> decltype(eastl::forward(a) >= eastl::forward(b)) + { return eastl::forward(a) >= eastl::forward(b); } + }; + + template + bool validate_greater_equal(const T& a, const T& b, Compare compare) + { + return !compare(a, b) || !compare(b, a); // If (a >= b), then !(b >= a) + } + + template + struct less_equal + { + EA_CPP14_CONSTEXPR bool operator()(const T& a, const T& b) const + { return a <= b; } + }; + + // http://en.cppreference.com/w/cpp/utility/functional/less_equal_void + template <> + struct less_equal + { + template + EA_CPP14_CONSTEXPR auto operator()(A&& a, B&& b) const + -> decltype(eastl::forward(a) <= eastl::forward(b)) + { return eastl::forward(a) <= eastl::forward(b); } + }; + + template + bool validate_less_equal(const T& a, const T& b, Compare compare) + { + return !compare(a, b) || !compare(b, a); // If (a <= b), then !(b <= a) + } + + // todo: when C++20 support added, add a compare_three_way function object. + + template + struct logical_and + { + EA_CPP14_CONSTEXPR bool operator()(const T& a, const T& b) const + { return a && b; } + }; + + // http://en.cppreference.com/w/cpp/utility/functional/logical_and_void + template <> + struct logical_and + { + template + EA_CPP14_CONSTEXPR auto operator()(A&& a, B&& b) const + -> decltype(eastl::forward(a) && eastl::forward(b)) + { return eastl::forward(a) && eastl::forward(b); } + }; + + template + struct logical_or + { + EA_CPP14_CONSTEXPR bool operator()(const T& a, const T& b) const + { return a || b; } + }; + + // http://en.cppreference.com/w/cpp/utility/functional/logical_or_void + template <> + struct logical_or + { + template + EA_CPP14_CONSTEXPR auto operator()(A&& a, B&& b) const + -> decltype(eastl::forward(a) || eastl::forward(b)) + { return eastl::forward(a) || eastl::forward(b); } + }; + + template + struct logical_not + { + EA_CPP14_CONSTEXPR bool operator()(const T& a) const + { return !a; } + }; + + // http://en.cppreference.com/w/cpp/utility/functional/logical_not_void + template <> + struct logical_not + { + template + EA_CPP14_CONSTEXPR auto operator()(T&& t) const + -> decltype(!eastl::forward(t)) + { return !eastl::forward(t); } + }; + + // http://en.cppreference.com/w/cpp/utility/functional/bit_and + template + struct bit_and + { + EA_CPP14_CONSTEXPR T operator()(const T& a, const T& b) const + { + return a & b; + } + }; + + // http://en.cppreference.com/w/cpp/utility/functional/bit_and_void + template <> + struct bit_and + { + typedef int is_transparent; + + template + EA_CPP14_CONSTEXPR auto operator()(T&& a, U&& b) const + -> decltype(eastl::forward(a) & eastl::forward(b)) + { + return eastl::forward(a) & eastl::forward(b); + } + }; + + // http://en.cppreference.com/w/cpp/utility/functional/bit_or + template + struct bit_or + { + EA_CPP14_CONSTEXPR T operator()(const T& a, const T& b) const + { + return a | b; + } + }; + + // http://en.cppreference.com/w/cpp/utility/functional/bit_or_void + template <> + struct bit_or + { + typedef int is_transparent; + + template + EA_CPP14_CONSTEXPR auto operator()(T&& a, U&& b) const + -> decltype(eastl::forward(a) | eastl::forward(b)) + { + return eastl::forward(a) | eastl::forward(b); + } + }; + + // http://en.cppreference.com/w/cpp/utility/functional/bit_xor + template + struct bit_xor + { + EA_CPP14_CONSTEXPR T operator()(const T& a, const T& b) const + { + return a ^ b; + } + }; + + // http://en.cppreference.com/w/cpp/utility/functional/bit_xor_void + template <> + struct bit_xor + { + typedef int is_transparent; + + template + EA_CPP14_CONSTEXPR auto operator()(T&& a, U&& b) const + -> decltype(eastl::forward(a) ^ eastl::forward(b)) + { + return eastl::forward(a) ^ eastl::forward(b); + } + }; + + // http://en.cppreference.com/w/cpp/utility/functional/bit_not + template + struct bit_not + { + EA_CPP14_CONSTEXPR T operator()(const T& a) const + { + return ~a; + } + }; + + // http://en.cppreference.com/w/cpp/utility/functional/bit_not_void + template <> + struct bit_not + { + typedef int is_transparent; + + template + EA_CPP14_CONSTEXPR auto operator()(T&& t) const + -> decltype(~eastl::forward(t)) + { + return ~eastl::forward(t); + } + }; + + // todo: add identity function object after removal of non-standard identity (renamed to type_identity). + + + /////////////////////////////////////////////////////////////////////// + // Dual type functions + /////////////////////////////////////////////////////////////////////// + + + // deprecated. consider using the specialization equal_to<> instead. + template + struct EASTL_REMOVE_AT_2024_APRIL equal_to_2 + { + EA_CPP14_CONSTEXPR bool operator()(const T& a, const U& b) const + { return a == b; } + + template , eastl::remove_const_t>>> + EA_CPP14_CONSTEXPR bool operator()(const U& b, const T& a) const + { return b == a; } + }; + + template + struct EASTL_REMOVE_AT_2024_APRIL not_equal_to_2 + { + EA_CPP14_CONSTEXPR bool operator()(const T& a, const U& b) const + { return a != b; } + + template , eastl::remove_const_t>>> + EA_CPP14_CONSTEXPR bool operator()(const U& b, const T& a) const + { return b != a; } + }; + + + template + struct EASTL_REMOVE_AT_2024_APRIL less_2 + { + EA_CPP14_CONSTEXPR bool operator()(const T& a, const U& b) const + { return a < b; } + + template , eastl::remove_const_t>>> + EA_CPP14_CONSTEXPR bool operator()(const U& b, const T& a) const + { return b < a; } + }; + + EASTL_INTERNAL_DISABLE_DEPRECATED() // '*': was declared deprecated + + /// unary_negate + /// + template + class EASTL_REMOVE_AT_2024_APRIL unary_negate + { + protected: + Predicate mPredicate; + public: + explicit unary_negate(const Predicate& a) + : mPredicate(a) {} + EA_CPP14_CONSTEXPR bool operator()(const typename Predicate::argument_type& a) const + { return !mPredicate(a); } + }; + + template + EASTL_REMOVE_AT_2024_APRIL inline EA_CPP14_CONSTEXPR unary_negate not1(const Predicate& predicate) + { return unary_negate(predicate); } + + + + /// binary_negate + /// + template + class EASTL_REMOVE_AT_2024_APRIL binary_negate + { + protected: + Predicate mPredicate; + public: + explicit binary_negate(const Predicate& a) + : mPredicate(a) { } + EA_CPP14_CONSTEXPR bool operator()(const typename Predicate::first_argument_type& a, const typename Predicate::second_argument_type& b) const + { return !mPredicate(a, b); } + }; + + template + EASTL_REMOVE_AT_2024_APRIL inline EA_CPP14_CONSTEXPR binary_negate not2(const Predicate& predicate) + { return binary_negate(predicate); } + + + + /// unary_compose + /// + template + struct EASTL_REMOVE_AT_2024_APRIL unary_compose + { + protected: + Operation1 op1; + Operation2 op2; + + public: + unary_compose(const Operation1& x, const Operation2& y) + : op1(x), op2(y) {} + + typename Operation1::result_type operator()(const typename Operation2::argument_type& x) const + { return op1(op2(x)); } + + typename Operation1::result_type operator()(typename Operation2::argument_type& x) const + { return op1(op2(x)); } + }; + + template + EASTL_REMOVE_AT_2024_APRIL inline unary_compose + compose1(const Operation1& op1, const Operation2& op2) + { + return unary_compose(op1,op2); + } + + /// binary_compose + /// + template + class EASTL_REMOVE_AT_2024_APRIL binary_compose + { + protected: + Operation1 op1; + Operation2 op2; + Operation3 op3; + + public: + // Support binary functors too. + typedef typename Operation2::argument_type first_argument_type; + typedef typename Operation3::argument_type second_argument_type; + + binary_compose(const Operation1& x, const Operation2& y, const Operation3& z) + : op1(x), op2(y), op3(z) { } + + typename Operation1::result_type operator()(const typename Operation2::argument_type& x) const + { return op1(op2(x),op3(x)); } + + typename Operation1::result_type operator()(typename Operation2::argument_type& x) const + { return op1(op2(x),op3(x)); } + + typename Operation1::result_type operator()(const typename Operation2::argument_type& x,const typename Operation3::argument_type& y) const + { return op1(op2(x),op3(y)); } + + typename Operation1::result_type operator()(typename Operation2::argument_type& x, typename Operation3::argument_type& y) const + { return op1(op2(x),op3(y)); } + }; + + + template + EASTL_REMOVE_AT_2024_APRIL inline binary_compose + compose2(const Operation1& op1, const Operation2& op2, const Operation3& op3) + { + return binary_compose(op1, op2, op3); + } + + + + /////////////////////////////////////////////////////////////////////// + // pointer_to_unary_function + /////////////////////////////////////////////////////////////////////// + + /// pointer_to_unary_function + /// + /// This is an adapter template which converts a pointer to a standalone + /// function to a function object. This allows standalone functions to + /// work in many cases where the system requires a function object. + /// + /// Example usage: + /// ptrdiff_t Rand(ptrdiff_t n) { return rand() % n; } // Note: The C rand function is poor and slow. + /// pointer_to_unary_function randInstance(Rand); + /// random_shuffle(pArrayBegin, pArrayEnd, randInstance); + /// + template + class EASTL_REMOVE_AT_2024_APRIL pointer_to_unary_function + : public unary_function + { + protected: + Result (*mpFunction)(Arg); + + public: + pointer_to_unary_function() + { } + + explicit pointer_to_unary_function(Result (*pFunction)(Arg)) + : mpFunction(pFunction) { } + + Result operator()(Arg x) const + { return mpFunction(x); } + }; + + + /// ptr_fun + /// + /// This ptr_fun is simply shorthand for usage of pointer_to_unary_function. + /// + /// Example usage (actually, you don't need to use ptr_fun here, but it works anyway): + /// int factorial(int x) { return (x > 1) ? (x * factorial(x - 1)) : x; } + /// transform(pIntArrayBegin, pIntArrayEnd, pIntArrayBegin, ptr_fun(factorial)); + /// + template + EASTL_REMOVE_AT_2024_APRIL inline pointer_to_unary_function + ptr_fun(Result (*pFunction)(Arg)) + { return pointer_to_unary_function(pFunction); } + + + + + + /////////////////////////////////////////////////////////////////////// + // pointer_to_binary_function + /////////////////////////////////////////////////////////////////////// + + /// pointer_to_binary_function + /// + /// This is an adapter template which converts a pointer to a standalone + /// function to a function object. This allows standalone functions to + /// work in many cases where the system requires a function object. + /// + template + class EASTL_REMOVE_AT_2024_APRIL pointer_to_binary_function + : public binary_function + { + protected: + Result (*mpFunction)(Arg1, Arg2); + + public: + pointer_to_binary_function() + { } + + explicit pointer_to_binary_function(Result (*pFunction)(Arg1, Arg2)) + : mpFunction(pFunction) {} + + Result operator()(Arg1 x, Arg2 y) const + { return mpFunction(x, y); } + }; + + + /// This ptr_fun is simply shorthand for usage of pointer_to_binary_function. + /// + /// Example usage (actually, you don't need to use ptr_fun here, but it works anyway): + /// int multiply(int x, int y) { return x * y; } + /// transform(pIntArray1Begin, pIntArray1End, pIntArray2Begin, pIntArray1Begin, ptr_fun(multiply)); + /// + template + EASTL_REMOVE_AT_2024_APRIL inline pointer_to_binary_function + ptr_fun(Result (*pFunction)(Arg1, Arg2)) + { return pointer_to_binary_function(pFunction); } + + + + + + + /////////////////////////////////////////////////////////////////////// + // mem_fun + // mem_fun1 + // + // Note that mem_fun calls member functions via *pointers* to classes + // and not instances of classes. mem_fun_ref is for calling functions + // via instances of classes or references to classes. + // + // NOTE: + // mem_fun was deprecated in C++11 and removed in C++17, in favor + // of the more general mem_fn and bind. + // + /////////////////////////////////////////////////////////////////////// + + /// mem_fun_t + /// + /// Member function with no arguments. + /// + template + class EASTL_REMOVE_AT_2024_APRIL mem_fun_t + : public unary_function + { + public: + typedef Result (T::*MemberFunction)(); + + inline explicit mem_fun_t(MemberFunction pMemberFunction) + : mpMemberFunction(pMemberFunction) + { + // Empty + } + + inline Result operator()(T* pT) const + { + return (pT->*mpMemberFunction)(); + } + + protected: + MemberFunction mpMemberFunction; + }; + + + /// mem_fun1_t + /// + /// Member function with one argument. + /// + template + class EASTL_REMOVE_AT_2024_APRIL mem_fun1_t + : public binary_function + { + public: + typedef Result (T::*MemberFunction)(Argument); + + inline explicit mem_fun1_t(MemberFunction pMemberFunction) + : mpMemberFunction(pMemberFunction) + { + // Empty + } + + inline Result operator()(T* pT, Argument arg) const + { + return (pT->*mpMemberFunction)(arg); + } + + protected: + MemberFunction mpMemberFunction; + }; + + + /// const_mem_fun_t + /// + /// Const member function with no arguments. + /// Note that we inherit from unary_function + /// instead of what the C++ standard specifies: unary_function. + /// The C++ standard is in error and this has been recognized by the defect group. + /// + template + class EASTL_REMOVE_AT_2024_APRIL const_mem_fun_t + : public unary_function + { + public: + typedef Result (T::*MemberFunction)() const; + + inline explicit const_mem_fun_t(MemberFunction pMemberFunction) + : mpMemberFunction(pMemberFunction) + { + // Empty + } + + inline Result operator()(const T* pT) const + { + return (pT->*mpMemberFunction)(); + } + + protected: + MemberFunction mpMemberFunction; + }; + + + /// const_mem_fun1_t + /// + /// Const member function with one argument. + /// Note that we inherit from unary_function + /// instead of what the C++ standard specifies: unary_function. + /// The C++ standard is in error and this has been recognized by the defect group. + /// + template + class EASTL_REMOVE_AT_2024_APRIL const_mem_fun1_t + : public binary_function + { + public: + typedef Result (T::*MemberFunction)(Argument) const; + + inline explicit const_mem_fun1_t(MemberFunction pMemberFunction) + : mpMemberFunction(pMemberFunction) + { + // Empty + } + + inline Result operator()(const T* pT, Argument arg) const + { + return (pT->*mpMemberFunction)(arg); + } + + protected: + MemberFunction mpMemberFunction; + }; + + + /// mem_fun + /// + /// This is the high level interface to the mem_fun_t family. + /// + /// Example usage: + /// struct TestClass { void print() { puts("hello"); } } + /// TestClass* pTestClassArray[3] = { ... }; + /// for_each(pTestClassArray, pTestClassArray + 3, &TestClass::print); + /// + /// Note: using conventional inlining here to avoid issues on GCC/Linux + /// + template + EASTL_REMOVE_AT_2024_APRIL inline mem_fun_t + mem_fun(Result (T::*MemberFunction)()) + { + return eastl::mem_fun_t(MemberFunction); + } + + template + EASTL_REMOVE_AT_2024_APRIL inline mem_fun1_t + mem_fun(Result (T::*MemberFunction)(Argument)) + { + return eastl::mem_fun1_t(MemberFunction); + } + + template + EASTL_REMOVE_AT_2024_APRIL inline const_mem_fun_t + mem_fun(Result (T::*MemberFunction)() const) + { + return eastl::const_mem_fun_t(MemberFunction); + } + + template + EASTL_REMOVE_AT_2024_APRIL inline const_mem_fun1_t + mem_fun(Result (T::*MemberFunction)(Argument) const) + { + return eastl::const_mem_fun1_t(MemberFunction); + } + + + + + + /////////////////////////////////////////////////////////////////////// + // mem_fun_ref + // mem_fun1_ref + // + /////////////////////////////////////////////////////////////////////// + + /// mem_fun_ref_t + /// + template + class EASTL_REMOVE_AT_2024_APRIL mem_fun_ref_t + : public unary_function + { + public: + typedef Result (T::*MemberFunction)(); + + inline explicit mem_fun_ref_t(MemberFunction pMemberFunction) + : mpMemberFunction(pMemberFunction) + { + // Empty + } + + inline Result operator()(T& t) const + { + return (t.*mpMemberFunction)(); + } + + protected: + MemberFunction mpMemberFunction; + }; + + + /// mem_fun1_ref_t + /// + template + class EASTL_REMOVE_AT_2024_APRIL mem_fun1_ref_t + : public binary_function + { + public: + typedef Result (T::*MemberFunction)(Argument); + + inline explicit mem_fun1_ref_t(MemberFunction pMemberFunction) + : mpMemberFunction(pMemberFunction) + { + // Empty + } + + inline Result operator()(T& t, Argument arg) const + { + return (t.*mpMemberFunction)(arg); + } + + protected: + MemberFunction mpMemberFunction; + }; + + + /// const_mem_fun_ref_t + /// + template + class EASTL_REMOVE_AT_2024_APRIL const_mem_fun_ref_t + : public unary_function + { + public: + typedef Result (T::*MemberFunction)() const; + + inline explicit const_mem_fun_ref_t(MemberFunction pMemberFunction) + : mpMemberFunction(pMemberFunction) + { + // Empty + } + + inline Result operator()(const T& t) const + { + return (t.*mpMemberFunction)(); + } + + protected: + MemberFunction mpMemberFunction; + }; + + + /// const_mem_fun1_ref_t + /// + template + class EASTL_REMOVE_AT_2024_APRIL const_mem_fun1_ref_t + : public binary_function + { + public: + typedef Result (T::*MemberFunction)(Argument) const; + + inline explicit const_mem_fun1_ref_t(MemberFunction pMemberFunction) + : mpMemberFunction(pMemberFunction) + { + // Empty + } + + inline Result operator()(const T& t, Argument arg) const + { + return (t.*mpMemberFunction)(arg); + } + + protected: + MemberFunction mpMemberFunction; + }; + + + /// mem_fun_ref + /// Example usage: + /// struct TestClass { void print() { puts("hello"); } } + /// TestClass testClassArray[3]; + /// for_each(testClassArray, testClassArray + 3, &TestClass::print); + /// + /// Note: using conventional inlining here to avoid issues on GCC/Linux + /// + template + EASTL_REMOVE_AT_2024_APRIL inline mem_fun_ref_t + mem_fun_ref(Result (T::*MemberFunction)()) + { + return eastl::mem_fun_ref_t(MemberFunction); + } + + template + EASTL_REMOVE_AT_2024_APRIL inline mem_fun1_ref_t + mem_fun_ref(Result (T::*MemberFunction)(Argument)) + { + return eastl::mem_fun1_ref_t(MemberFunction); + } + + template + EASTL_REMOVE_AT_2024_APRIL inline const_mem_fun_ref_t + mem_fun_ref(Result (T::*MemberFunction)() const) + { + return eastl::const_mem_fun_ref_t(MemberFunction); + } + + template + EASTL_REMOVE_AT_2024_APRIL inline const_mem_fun1_ref_t + mem_fun_ref(Result (T::*MemberFunction)(Argument) const) + { + return eastl::const_mem_fun1_ref_t(MemberFunction); + } + + EASTL_INTERNAL_RESTORE_DEPRECATED() + + // not_fn_ret + // not_fn_ret is a implementation specified return type of eastl::not_fn. + // The type name is not specified but it does have mandated functions that conforming implementations must support. + // + // http://en.cppreference.com/w/cpp/utility/functional/not_fn + // + template + struct not_fn_ret + { + explicit not_fn_ret(F&& f) : mDecayF(eastl::forward(f)) {} + not_fn_ret(not_fn_ret&& f) = default; + not_fn_ret(const not_fn_ret& f) = default; + + // overloads for lvalues + template + auto operator()(Args&&... args) & + -> decltype(!eastl::declval&, Args...>>()) + { return !eastl::invoke(mDecayF, eastl::forward(args)...); } + + template + auto operator()(Args&&... args) const & + -> decltype(!eastl::declval const&, Args...>>()) + { return !eastl::invoke(mDecayF, eastl::forward(args)...); } + + // overloads for rvalues + template + auto operator()(Args&&... args) && + -> decltype(!eastl::declval, Args...>>()) + { return !eastl::invoke(eastl::move(mDecayF), eastl::forward(args)...); } + + template + auto operator()(Args&&... args) const && + -> decltype(!eastl::declval const, Args...>>()) + { return !eastl::invoke(eastl::move(mDecayF), eastl::forward(args)...); } + + eastl::decay_t mDecayF; + }; + + /// not_fn + /// + /// Creates an implementation specified functor that returns the complement of the callable object it was passed. + /// not_fn is intended to replace the C++03-era negators eastl::not1 and eastl::not2. + /// + /// http://en.cppreference.com/w/cpp/utility/functional/not_fn + /// + /// Example usage: + /// + /// auto nf = eastl::not_fn([]{ return false; }); + /// assert(nf()); // return true + /// + template + inline not_fn_ret not_fn(F&& f) + { + return not_fn_ret(eastl::forward(f)); + } + + + /////////////////////////////////////////////////////////////////////// + // hash + /////////////////////////////////////////////////////////////////////// + namespace Internal + { + // utility to disable the generic template specialization that is + // used for enum types only. + template + struct EnableHashIf {}; + + template + struct EnableHashIf + { + size_t operator()(T p) const { return size_t(p); } + }; + } // namespace Internal + + + template struct hash; + + template + struct hash : Internal::EnableHashIf> {}; + + template struct hash // Note that we use the pointer as-is and don't divide by sizeof(T*). This is because the table is of a prime size and this division doesn't benefit distribution. + { size_t operator()(T* p) const { return size_t(uintptr_t(p)); } }; + + template <> struct hash + { size_t operator()(bool val) const { return static_cast(val); } }; + + template <> struct hash + { size_t operator()(char val) const { return static_cast(val); } }; + + template <> struct hash + { size_t operator()(signed char val) const { return static_cast(val); } }; + + template <> struct hash + { size_t operator()(unsigned char val) const { return static_cast(val); } }; + + #if defined(EA_CHAR8_UNIQUE) && EA_CHAR8_UNIQUE + template <> struct hash + { size_t operator()(char8_t val) const { return static_cast(val); } }; + #endif + + #if defined(EA_CHAR16_NATIVE) && EA_CHAR16_NATIVE + template <> struct hash + { size_t operator()(char16_t val) const { return static_cast(val); } }; + #endif + + #if defined(EA_CHAR32_NATIVE) && EA_CHAR32_NATIVE + template <> struct hash + { size_t operator()(char32_t val) const { return static_cast(val); } }; + #endif + + // If wchar_t is a native type instead of simply a define to an existing type... + #if !defined(EA_WCHAR_T_NON_NATIVE) + template <> struct hash + { size_t operator()(wchar_t val) const { return static_cast(val); } }; + #endif + + template <> struct hash + { size_t operator()(signed short val) const { return static_cast(val); } }; + + template <> struct hash + { size_t operator()(unsigned short val) const { return static_cast(val); } }; + + template <> struct hash + { size_t operator()(signed int val) const { return static_cast(val); } }; + + template <> struct hash + { size_t operator()(unsigned int val) const { return static_cast(val); } }; + + template <> struct hash + { size_t operator()(signed long val) const { return static_cast(val); } }; + + template <> struct hash + { size_t operator()(unsigned long val) const { return static_cast(val); } }; + + template <> struct hash + { size_t operator()(signed long long val) const { return static_cast(val); } }; + + template <> struct hash + { size_t operator()(unsigned long long val) const { return static_cast(val); } }; + + template <> struct hash + { size_t operator()(float val) const { return static_cast(val); } }; + + template <> struct hash + { size_t operator()(double val) const { return static_cast(val); } }; + + template <> struct hash + { size_t operator()(long double val) const { return static_cast(val); } }; + + #if defined(EA_HAVE_INT128) && EA_HAVE_INT128 + template <> struct hash + { size_t operator()(uint128_t val) const { return static_cast(val); } }; + #endif + + + /////////////////////////////////////////////////////////////////////////// + // string hashes + // + // Note that our string hashes here intentionally are slow for long strings. + // The reasoning for this is so: + // - The large majority of hashed strings are only a few bytes long. + // - The hash function is significantly more efficient if it can make this assumption. + // - The user is welcome to make a custom hash for those uncommon cases where + // long strings need to be hashed. Indeed, the user can probably make a + // special hash customized for such strings that's better than what we provide. + /////////////////////////////////////////////////////////////////////////// + + template <> struct hash + { + size_t operator()(const char* p) const + { + uint32_t c, result = 2166136261U; // FNV1 hash. Perhaps the best string hash. Intentionally uint32_t instead of size_t, so the behavior is the same regardless of size. + while((c = (uint8_t)*p++) != 0) // Using '!=' disables compiler warnings. + result = (result * 16777619) ^ c; + return (size_t)result; + } + }; + + template <> struct hash + { + size_t operator()(const char* p) const + { + uint32_t c, result = 2166136261U; // Intentionally uint32_t instead of size_t, so the behavior is the same regardless of size. + while((c = (uint8_t)*p++) != 0) // cast to unsigned 8 bit. + result = (result * 16777619) ^ c; + return (size_t)result; + } + }; + +#if EA_CHAR8_UNIQUE + template <> struct hash + { + size_t operator()(const char8_t* p) const + { + uint32_t c, result = 2166136261U; // FNV1 hash. Perhaps the best string hash. Intentionally uint32_t instead of size_t, so the behavior is the same regardless of size. + while((c = (uint8_t)*p++) != 0) // Using '!=' disables compiler warnings. + result = (result * 16777619) ^ c; + return (size_t)result; + } + }; + + template <> struct hash + { + size_t operator()(const char8_t* p) const + { + uint32_t c, result = 2166136261U; // Intentionally uint32_t instead of size_t, so the behavior is the same regardless of size. + while((c = (uint8_t)*p++) != 0) // cast to unsigned 8 bit. + result = (result * 16777619) ^ c; + return (size_t)result; + } + }; +#endif + + + template <> struct hash + { + size_t operator()(const char16_t* p) const + { + uint32_t c, result = 2166136261U; // Intentionally uint32_t instead of size_t, so the behavior is the same regardless of size. + while((c = (uint16_t)*p++) != 0) // cast to unsigned 16 bit. + result = (result * 16777619) ^ c; + return (size_t)result; + } + }; + + template <> struct hash + { + size_t operator()(const char16_t* p) const + { + uint32_t c, result = 2166136261U; // Intentionally uint32_t instead of size_t, so the behavior is the same regardless of size. + while((c = (uint16_t)*p++) != 0) // cast to unsigned 16 bit. + result = (result * 16777619) ^ c; + return (size_t)result; + } + }; + + template <> struct hash + { + size_t operator()(const char32_t* p) const + { + uint32_t c, result = 2166136261U; // Intentionally uint32_t instead of size_t, so the behavior is the same regardless of size. + while((c = (uint32_t)*p++) != 0) // cast to unsigned 32 bit. + result = (result * 16777619) ^ c; + return (size_t)result; + } + }; + + template <> struct hash + { + size_t operator()(const char32_t* p) const + { + uint32_t c, result = 2166136261U; // Intentionally uint32_t instead of size_t, so the behavior is the same regardless of size. + while((c = (uint32_t)*p++) != 0) // cast to unsigned 32 bit. + result = (result * 16777619) ^ c; + return (size_t)result; + } + }; + +#if defined(EA_WCHAR_UNIQUE) && EA_WCHAR_UNIQUE + template<> struct hash + { + size_t operator()(const wchar_t* p) const + { + uint32_t c, result = 2166136261U; // Intentionally uint32_t instead of size_t, so the behavior is the same regardless of size. + while ((c = (uint32_t)*p++) != 0) // cast to unsigned 32 bit. + result = (result * 16777619) ^ c; + return (size_t)result; + } + }; + + template<> struct hash + { + size_t operator()(const wchar_t* p) const + { + uint32_t c, result = 2166136261U; // Intentionally uint32_t instead of size_t, so the behavior is the same regardless of size. + while ((c = (uint32_t)*p++) != 0) // cast to unsigned 32 bit. + result = (result * 16777619) ^ c; + return (size_t)result; + } + }; +#endif + + /// string_hash + /// + /// Defines a generic string hash for an arbitrary EASTL basic_string container. + /// + /// Example usage: + /// eastl::hash_set > hashSet; + /// + template + struct string_hash + { + typedef String string_type; + typedef typename String::value_type value_type; + typedef typename eastl::make_unsigned::type unsigned_value_type; + + size_t operator()(const string_type& s) const + { + const unsigned_value_type* p = (const unsigned_value_type*)s.c_str(); + uint32_t c, result = 2166136261U; // Intentionally uint32_t instead of size_t, so the behavior is the same regardless of size. + while((c = *p++) != 0) + result = (result * 16777619) ^ c; + return (size_t)result; + } + }; + + +} // namespace eastl + +#include + +EA_RESTORE_VC_WARNING(); + +#endif // Header include guard + + + + + + + diff --git a/external/EASTL/include/EASTL/hash_map.h b/external/EASTL/include/EASTL/hash_map.h new file mode 100644 index 00000000..a959e270 --- /dev/null +++ b/external/EASTL/include/EASTL/hash_map.h @@ -0,0 +1,688 @@ +/////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +/////////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// This file is based on the TR1 (technical report 1) reference implementation +// of the unordered_set/unordered_map C++ classes as of about 4/2005. Most likely +// many or all C++ library vendors' implementations of this classes will be +// based off of the reference version and so will look pretty similar to this +// file as well as other vendors' versions. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_HASH_MAP_H +#define EASTL_HASH_MAP_H + + +#include +#include +#include +#include +#if EASTL_EXCEPTIONS_ENABLED +#include +#endif + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + + /// EASTL_HASH_MAP_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// + #ifndef EASTL_HASH_MAP_DEFAULT_NAME + #define EASTL_HASH_MAP_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " hash_map" // Unless the user overrides something, this is "EASTL hash_map". + #endif + + + /// EASTL_HASH_MULTIMAP_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// + #ifndef EASTL_HASH_MULTIMAP_DEFAULT_NAME + #define EASTL_HASH_MULTIMAP_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " hash_multimap" // Unless the user overrides something, this is "EASTL hash_multimap". + #endif + + + /// EASTL_HASH_MAP_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_HASH_MAP_DEFAULT_ALLOCATOR + #define EASTL_HASH_MAP_DEFAULT_ALLOCATOR allocator_type(EASTL_HASH_MAP_DEFAULT_NAME) + #endif + + /// EASTL_HASH_MULTIMAP_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_HASH_MULTIMAP_DEFAULT_ALLOCATOR + #define EASTL_HASH_MULTIMAP_DEFAULT_ALLOCATOR allocator_type(EASTL_HASH_MULTIMAP_DEFAULT_NAME) + #endif + + + + /// hash_map + /// + /// Implements a hash_map, which is a hashed associative container. + /// Lookups are O(1) (that is, they are fast) but the container is + /// not sorted. Note that lookups are only O(1) if the hash table + /// is well-distributed (non-colliding). The lookup approaches + /// O(n) behavior as the table becomes increasingly poorly distributed. + /// + /// set_max_load_factor + /// If you want to make a hashtable never increase its bucket usage, + /// call set_max_load_factor with a very high value such as 100000.f. + /// + /// bCacheHashCode + /// We provide the boolean bCacheHashCode template parameter in order + /// to allow the storing of the hash code of the key within the map. + /// When this option is disabled, the rehashing of the table will + /// call the hash function on the key. Setting bCacheHashCode to true + /// is useful for cases whereby the calculation of the hash value for + /// a contained object is very expensive. + /// + /// find_as + /// In order to support the ability to have a hashtable of strings but + /// be able to do efficiently lookups via char pointers (i.e. so they + /// aren't converted to string objects), we provide the find_as + /// function. This function allows you to do a find with a key of a + /// type other than the hashtable key type. + /// + /// Example find_as usage: + /// hash_map hashMap; + /// i = hashMap.find_as("hello"); // Use default hash and compare. + /// + /// Example find_as usage (namespaces omitted for brevity): + /// hash_map hashMap; + /// i = hashMap.find_as("hello", hash(), equal_to<>()); + /// + template , typename Predicate = eastl::equal_to, + typename Allocator = EASTLAllocatorType, bool bCacheHashCode = false> + class hash_map + : public hashtable, Allocator, eastl::use_first >, Predicate, + Hash, mod_range_hashing, default_ranged_hash, prime_rehash_policy, bCacheHashCode, true, true> + { + public: + typedef hashtable, Allocator, + eastl::use_first >, + Predicate, Hash, mod_range_hashing, default_ranged_hash, + prime_rehash_policy, bCacheHashCode, true, true> base_type; + typedef hash_map this_type; + typedef typename base_type::size_type size_type; + typedef typename base_type::key_type key_type; + typedef T mapped_type; + typedef typename base_type::value_type value_type; // NOTE: 'value_type = pair'. + typedef typename base_type::allocator_type allocator_type; + typedef typename base_type::node_type node_type; + typedef typename base_type::insert_return_type insert_return_type; + typedef typename base_type::iterator iterator; + typedef typename base_type::const_iterator const_iterator; + + using base_type::insert; + + public: + /// hash_map + /// + /// Default constructor. + /// + hash_map() + : this_type(EASTL_HASH_MAP_DEFAULT_ALLOCATOR) + { + // Empty + } + + + /// hash_map + /// + /// Constructor which creates an empty container with allocator. + /// + explicit hash_map(const allocator_type& allocator) + : base_type(0, Hash(), mod_range_hashing(), default_ranged_hash(), + Predicate(), eastl::use_first >(), allocator) + { + // Empty + } + + + /// hash_map + /// + /// Constructor which creates an empty container, but start with nBucketCount buckets. + /// We default to a small nBucketCount value, though the user really should manually + /// specify an appropriate value in order to prevent memory from being reallocated. + /// + /// note: difference in explicit keyword from the standard. + explicit hash_map(size_type nBucketCount, const Hash& hashFunction = Hash(), + const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_MAP_DEFAULT_ALLOCATOR) + : base_type(nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(), + predicate, eastl::use_first >(), allocator) + { + // Empty + } + + // hash_map(size_type nBucketCount, const allocator_type& allocator) + // hash_map(size_type nBucketCount, const Hash& hashFunction, const allocator_type& allocator) + + + hash_map(const this_type& x) + : base_type(x) + { + } + + // hash_map(const this_type& x, const allocator_type& allocator) + + + hash_map(this_type&& x) + : base_type(eastl::move(x)) + { + } + + + hash_map(this_type&& x, const allocator_type& allocator) + : base_type(eastl::move(x), allocator) + { + } + + + /// hash_map + /// + /// initializer_list-based constructor. + /// Allows for initializing with brace values (e.g. hash_map hm = { {3,"c"}, {4,"d"}, {5,"e"} }; ) + /// + hash_map(std::initializer_list ilist, size_type nBucketCount = 0, const Hash& hashFunction = Hash(), + const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_MAP_DEFAULT_ALLOCATOR) + : base_type(ilist.begin(), ilist.end(), nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(), + predicate, eastl::use_first >(), allocator) + { + // Empty + } + + hash_map(std::initializer_list ilist, const allocator_type& allocator) + : base_type(ilist.begin(), ilist.end(), 0, Hash(), mod_range_hashing(), default_ranged_hash(), Predicate(), eastl::use_first >(), allocator) + { + // Empty + } + + // hash_map(std::initializer_list ilist, size_type nBucketCount, const allocator_type& allocator) + + // hash_map(std::initializer_list ilist, size_type nBucketCount, const Hash& hashFunction, + // const allocator_type& allocator) + + /// hash_map + /// + /// An input bucket count of <= 1 causes the bucket count to be equal to the number of + /// elements in the input range. + /// + template + hash_map(ForwardIterator first, ForwardIterator last, size_type nBucketCount = 0, const Hash& hashFunction = Hash(), + const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_MAP_DEFAULT_ALLOCATOR) + : base_type(first, last, nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(), + predicate, eastl::use_first >(), allocator) + { + // Empty + } + + // template + // hash_map(ForwardIterator first, ForwardIterator last, size_type nBucketCount, const allocator_type& allocator) + + // template + // hash_map(ForwardIterator first, ForwardIterator last, size_type nBucketCount, const Hash& hashFunction, const allocator_type& allocator) + + this_type& operator=(const this_type& x) + { + return static_cast(base_type::operator=(x)); + } + + + this_type& operator=(std::initializer_list ilist) + { + return static_cast(base_type::operator=(ilist)); + } + + + this_type& operator=(this_type&& x) + { + return static_cast(base_type::operator=(eastl::move(x))); + } + + + /// insert + /// + /// This is an extension to the C++ standard. We insert a default-constructed + /// element with the given key. The reason for this is that we can avoid the + /// potentially expensive operation of creating and/or copying a mapped_type + /// object on the stack. + insert_return_type insert(const key_type& key) + { + return base_type::DoInsertKey(true_type(), key); + } + + T& at(const key_type& k) + { + iterator it = base_type::find(k); + + if (it == base_type::end()) + { + #if EASTL_EXCEPTIONS_ENABLED + // throw exeption if exceptions enabled + throw std::out_of_range("invalid hash_map key"); + #else + // assert false if asserts enabled + EASTL_ASSERT_MSG(false, "invalid hash_map key"); + #endif + } + // undefined behaviour if exceptions and asserts are disabled and it == end() + return it->second; + } + + + const T& at(const key_type& k) const + { + const_iterator it = base_type::find(k); + + if (it == base_type::end()) + { + #if EASTL_EXCEPTIONS_ENABLED + // throw exeption if exceptions enabled + throw std::out_of_range("invalid hash_map key"); + #else + // assert false if asserts enabled + EASTL_ASSERT_MSG(false, "invalid hash_map key"); + #endif + } + // undefined behaviour if exceptions and asserts are disabled and it == end() + return it->second; + } + + + insert_return_type insert(key_type&& key) + { + return base_type::DoInsertKey(true_type(), eastl::move(key)); + } + + + mapped_type& operator[](const key_type& key) + { + return (*base_type::DoInsertKey(true_type(), key).first).second; + + // Slower reference version: + //const typename base_type::iterator it = base_type::find(key); + //if(it != base_type::end()) + // return (*it).second; + //return (*base_type::insert(value_type(key, mapped_type())).first).second; + } + + mapped_type& operator[](key_type&& key) + { + // The Standard states that this function "inserts the value value_type(std::move(key), mapped_type())" + return (*base_type::DoInsertKey(true_type(), eastl::move(key)).first).second; + } + + // try_emplace API added in C++17 + template + inline insert_return_type try_emplace(const key_type& k, Args&&... args) + { + return try_emplace_forwarding(k, eastl::forward(args)...); + } + + template + inline insert_return_type try_emplace(key_type&& k, Args&&... args) { + return try_emplace_forwarding(eastl::move(k), eastl::forward(args)...); + } + + template + inline iterator try_emplace(const_iterator, const key_type& k, Args&&... args) { + // Currently, the first parameter is ignored. + insert_return_type result = try_emplace(k, eastl::forward(args)...); + return base_type::DoGetResultIterator(true_type(), result); + } + + template + inline iterator try_emplace(const_iterator, key_type&& k, Args&&... args) { + // Currently, the first parameter is ignored. + insert_return_type result = try_emplace(eastl::move(k), eastl::forward(args)...); + return base_type::DoGetResultIterator(true_type(), result); + } + + private: + template + insert_return_type try_emplace_forwarding(K&& k, Args&&... args) + { + const auto key_data = base_type::DoFindKeyData(k); + if (key_data.node) + { // Node exists, no insertion needed. + return eastl::pair( + iterator(key_data.node, base_type::mpBucketArray + key_data.bucket_index), false); + } + else + { + node_type* const pNodeNew = + base_type::DoAllocateNode(piecewise_construct, eastl::forward_as_tuple(eastl::forward(k)), + forward_as_tuple(eastl::forward(args)...)); + // the key might have been moved from above, so we can't use `k` anymore. + const auto& key = base_type::mExtractKey(pNodeNew->mValue); + return base_type::template DoInsertUniqueNode(key, key_data.code, key_data.bucket_index, pNodeNew); + } + } + }; // hash_map + + /// hash_map erase_if + /// + /// https://en.cppreference.com/w/cpp/container/unordered_map/erase_if + template + typename eastl::hash_map::size_type erase_if(eastl::hash_map& c, UserPredicate predicate) + { + auto oldSize = c.size(); + // Erases all elements that satisfy the predicate from the container. + for (auto i = c.begin(), last = c.end(); i != last;) + { + if (predicate(*i)) + { + i = c.erase(i); + } + else + { + ++i; + } + } + return oldSize - c.size(); + } + + + /// hash_multimap + /// + /// Implements a hash_multimap, which is the same thing as a hash_map + /// except that contained elements need not be unique. See the + /// documentation for hash_set for details. + /// + template , typename Predicate = eastl::equal_to, + typename Allocator = EASTLAllocatorType, bool bCacheHashCode = false> + class hash_multimap + : public hashtable, Allocator, eastl::use_first >, Predicate, + Hash, mod_range_hashing, default_ranged_hash, prime_rehash_policy, bCacheHashCode, true, false> + { + public: + typedef hashtable, Allocator, + eastl::use_first >, + Predicate, Hash, mod_range_hashing, default_ranged_hash, + prime_rehash_policy, bCacheHashCode, true, false> base_type; + typedef hash_multimap this_type; + typedef typename base_type::size_type size_type; + typedef typename base_type::key_type key_type; + typedef T mapped_type; + typedef typename base_type::value_type value_type; // Note that this is pair. + typedef typename base_type::allocator_type allocator_type; + typedef typename base_type::node_type node_type; + typedef typename base_type::insert_return_type insert_return_type; + typedef typename base_type::iterator iterator; + + using base_type::insert; + + private: + using base_type::insert_or_assign; + + public: + /// hash_multimap + /// + /// Default constructor. + /// + explicit hash_multimap(const allocator_type& allocator = EASTL_HASH_MULTIMAP_DEFAULT_ALLOCATOR) + : base_type(0, Hash(), mod_range_hashing(), default_ranged_hash(), + Predicate(), eastl::use_first >(), allocator) + { + // Empty + } + + + /// hash_multimap + /// + /// Constructor which creates an empty container, but start with nBucketCount buckets. + /// We default to a small nBucketCount value, though the user really should manually + /// specify an appropriate value in order to prevent memory from being reallocated. + /// + /// note: difference in explicit keyword from the standard. + explicit hash_multimap(size_type nBucketCount, const Hash& hashFunction = Hash(), + const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_MULTIMAP_DEFAULT_ALLOCATOR) + : base_type(nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(), + predicate, eastl::use_first >(), allocator) + { + // Empty + } + + // hash_multimap(size_type nBucketCount, const allocator_type& allocator) + // hash_multimap(size_type nBucketCount, const Hash& hashFunction, const allocator_type& allocator) + + + hash_multimap(const this_type& x) + : base_type(x) + { + } + + // hash_multimap(const this_type& x, const allocator_type& allocator) + + + hash_multimap(this_type&& x) + : base_type(eastl::move(x)) + { + } + + + hash_multimap(this_type&& x, const allocator_type& allocator) + : base_type(eastl::move(x), allocator) + { + } + + + /// hash_multimap + /// + /// initializer_list-based constructor. + /// Allows for initializing with brace values (e.g. hash_multimap hm = { {3,"c"}, {3,"C"}, {4,"d"} }; ) + /// + hash_multimap(std::initializer_list ilist, size_type nBucketCount = 0, const Hash& hashFunction = Hash(), + const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_MULTIMAP_DEFAULT_ALLOCATOR) + : base_type(ilist.begin(), ilist.end(), nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(), + predicate, eastl::use_first >(), allocator) + { + // Empty + } + + hash_multimap(std::initializer_list ilist, const allocator_type& allocator) + : base_type(ilist.begin(), ilist.end(), 0, Hash(), mod_range_hashing(), default_ranged_hash(), Predicate(), eastl::use_first >(), allocator) + { + // Empty + } + + // hash_multimap(std::initializer_list ilist, size_type nBucketCount, const allocator_type& allocator) + + // hash_multimap(std::initializer_list ilist, size_type nBucketCount, const Hash& hashFunction, + // const allocator_type& allocator) + + + /// hash_multimap + /// + /// An input bucket count of <= 1 causes the bucket count to be equal to the number of + /// elements in the input range. + /// + template + hash_multimap(ForwardIterator first, ForwardIterator last, size_type nBucketCount = 0, const Hash& hashFunction = Hash(), + const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_MULTIMAP_DEFAULT_ALLOCATOR) + : base_type(first, last, nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(), + predicate, eastl::use_first >(), allocator) + { + // Empty + } + + // template + // hash_multimap(ForwardIterator first, ForwardIterator last, size_type nBucketCount, const allocator_type& allocator) + + // template + // hash_multimap(ForwardIterator first, ForwardIterator last, size_type nBucketCount, const Hash& hashFunction, const allocator_type& allocator) + + this_type& operator=(const this_type& x) + { + return static_cast(base_type::operator=(x)); + } + + + this_type& operator=(std::initializer_list ilist) + { + return static_cast(base_type::operator=(ilist)); + } + + + this_type& operator=(this_type&& x) + { + return static_cast(base_type::operator=(eastl::move(x))); + } + + + /// insert + /// + /// This is an extension to the C++ standard. We insert a default-constructed + /// element with the given key. The reason for this is that we can avoid the + /// potentially expensive operation of creating and/or copying a mapped_type + /// object on the stack. + insert_return_type insert(const key_type& key) + { + return base_type::DoInsertKey(false_type(), key); + } + + + insert_return_type insert(key_type&& key) + { + return base_type::DoInsertKey(false_type(), eastl::move(key)); + } + + }; // hash_multimap + + /// hash_multimap erase_if + /// + /// https://en.cppreference.com/w/cpp/container/unordered_multimap/erase_if + template + typename eastl::hash_multimap::size_type erase_if(eastl::hash_multimap& c, UserPredicate predicate) + { + auto oldSize = c.size(); + // Erases all elements that satisfy the predicate from the container. + for (auto i = c.begin(), last = c.end(); i != last;) + { + if (predicate(*i)) + { + i = c.erase(i); + } + else + { + ++i; + } + } + return oldSize - c.size(); + } + + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + inline bool operator==(const hash_map& a, + const hash_map& b) + { + typedef typename hash_map::const_iterator const_iterator; + + // We implement branching with the assumption that the return value is usually false. + if(a.size() != b.size()) + return false; + + // For map (with its unique keys), we need only test that each element in a can be found in b, + // as there can be only one such pairing per element. multimap needs to do a something more elaborate. + for(const_iterator ai = a.begin(), aiEnd = a.end(), biEnd = b.end(); ai != aiEnd; ++ai) + { + const_iterator bi = b.find(ai->first); + + if((bi == biEnd) || !(*ai == *bi)) // We have to compare the values, because lookups are done by keys alone but the full value_type of a map is a key/value pair. + return false; // It's possible that two elements in the two containers have identical keys but different values. + } + + return true; + } + +#if !defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON) + template + inline bool operator!=(const hash_map& a, + const hash_map& b) + { + return !(a == b); + } +#endif + + template + inline bool operator==(const hash_multimap& a, + const hash_multimap& b) + { + typedef typename hash_multimap::const_iterator const_iterator; + typedef typename eastl::iterator_traits::difference_type difference_type; + + // We implement branching with the assumption that the return value is usually false. + if(a.size() != b.size()) + return false; + + // We can't simply search for each element of a in b, as it may be that the bucket for + // two elements in a has those same two elements in b but in different order (which should + // still result in equality). Also it's possible that one bucket in a has two elements which + // both match a solitary element in the equivalent bucket in b (which shouldn't result in equality). + eastl::pair aRange; + eastl::pair bRange; + + for(const_iterator ai = a.begin(), aiEnd = a.end(); ai != aiEnd; ai = aRange.second) // For each element in a... + { + aRange = a.equal_range(ai->first); // Get the range of elements in a that are equal to ai. + bRange = b.equal_range(ai->first); // Get the range of elements in b that are equal to ai. + + // We need to verify that aRange == bRange. First make sure the range sizes are equivalent... + const difference_type aDistance = eastl::distance(aRange.first, aRange.second); + const difference_type bDistance = eastl::distance(bRange.first, bRange.second); + + if(aDistance != bDistance) + return false; + + // At this point, aDistance > 0 and aDistance == bDistance. + // Implement a fast pathway for the case that there's just a single element. + if(aDistance == 1) + { + if(!(*aRange.first == *bRange.first)) // We have to compare the values, because lookups are done by keys alone but the full value_type of a map is a key/value pair. + return false; // It's possible that two elements in the two containers have identical keys but different values. Ditto for the permutation case below. + } + else + { + // Check to see if these aRange and bRange are any permutation of each other. + // This check gets slower as there are more elements in the range. + if(!eastl::is_permutation(aRange.first, aRange.second, bRange.first)) + return false; + } + } + + return true; + } + +#if !defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON) + template + inline bool operator!=(const hash_multimap& a, + const hash_multimap& b) + { + return !(a == b); + } +#endif + + +} // namespace eastl + + +#endif // Header include guard + + + + + + diff --git a/external/EASTL/include/EASTL/hash_set.h b/external/EASTL/include/EASTL/hash_set.h new file mode 100644 index 00000000..9b404306 --- /dev/null +++ b/external/EASTL/include/EASTL/hash_set.h @@ -0,0 +1,532 @@ +/////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +/////////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// This file is based on the TR1 (technical report 1) reference implementation +// of the unordered_set/unordered_map C++ classes as of about 4/2005. Most likely +// many or all C++ library vendors' implementations of this classes will be +// based off of the reference version and so will look pretty similar to this +// file as well as other vendors' versions. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_HASH_SET_H +#define EASTL_HASH_SET_H + + +#include +#include +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + + /// EASTL_HASH_SET_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// + #ifndef EASTL_HASH_SET_DEFAULT_NAME + #define EASTL_HASH_SET_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " hash_set" // Unless the user overrides something, this is "EASTL hash_set". + #endif + + + /// EASTL_HASH_MULTISET_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// + #ifndef EASTL_HASH_MULTISET_DEFAULT_NAME + #define EASTL_HASH_MULTISET_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " hash_multiset" // Unless the user overrides something, this is "EASTL hash_multiset". + #endif + + + /// EASTL_HASH_SET_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_HASH_SET_DEFAULT_ALLOCATOR + #define EASTL_HASH_SET_DEFAULT_ALLOCATOR allocator_type(EASTL_HASH_SET_DEFAULT_NAME) + #endif + + /// EASTL_HASH_MULTISET_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_HASH_MULTISET_DEFAULT_ALLOCATOR + #define EASTL_HASH_MULTISET_DEFAULT_ALLOCATOR allocator_type(EASTL_HASH_MULTISET_DEFAULT_NAME) + #endif + + + + /// hash_set + /// + /// Implements a hash_set, which is a hashed unique-item container. + /// Lookups are O(1) (that is, they are fast) but the container is + /// not sorted. Note that lookups are only O(1) if the hash table + /// is well-distributed (non-colliding). The lookup approaches + /// O(n) behavior as the table becomes increasingly poorly distributed. + /// + /// set_max_load_factor + /// If you want to make a hashtable never increase its bucket usage, + /// call set_max_load_factor with a very high value such as 100000.f. + /// + /// bCacheHashCode + /// We provide the boolean bCacheHashCode template parameter in order + /// to allow the storing of the hash code of the key within the map. + /// When this option is disabled, the rehashing of the table will + /// call the hash function on the key. Setting bCacheHashCode to true + /// is useful for cases whereby the calculation of the hash value for + /// a contained object is very expensive. + /// + /// find_as + /// In order to support the ability to have a hashtable of strings but + /// be able to do efficiently lookups via char pointers (i.e. so they + /// aren't converted to string objects), we provide the find_as + /// function. This function allows you to do a find with a key of a + /// type other than the hashtable key type. + /// + /// Example find_as usage: + /// hash_set hashSet; + /// i = hashSet.find_as("hello"); // Use default hash and compare. + /// + /// Example find_as usage (namespaces omitted for brevity): + /// hash_set hashSet; + /// i = hashSet.find_as("hello", hash(), equal_to<>()); + /// + template , typename Predicate = eastl::equal_to, + typename Allocator = EASTLAllocatorType, bool bCacheHashCode = false> + class hash_set + : public hashtable, Predicate, + Hash, mod_range_hashing, default_ranged_hash, + prime_rehash_policy, bCacheHashCode, false, true> + { + public: + typedef hashtable, Predicate, + Hash, mod_range_hashing, default_ranged_hash, + prime_rehash_policy, bCacheHashCode, false, true> base_type; + typedef hash_set this_type; + typedef typename base_type::size_type size_type; + typedef typename base_type::value_type value_type; + typedef typename base_type::allocator_type allocator_type; + typedef typename base_type::node_type node_type; + + public: + /// hash_set + /// + /// Default constructor. + /// + hash_set() + : this_type(EASTL_HASH_SET_DEFAULT_ALLOCATOR) + { + // Empty + } + + + /// hash_set + /// + /// Constructor which creates an empty container with allocator. + /// + explicit hash_set(const allocator_type& allocator) + : base_type(0, Hash(), mod_range_hashing(), default_ranged_hash(), Predicate(), eastl::use_self(), allocator) + { + // Empty + } + + + /// hash_set + /// + /// Constructor which creates an empty container, but start with nBucketCount buckets. + /// We default to a small nBucketCount value, though the user really should manually + /// specify an appropriate value in order to prevent memory from being reallocated. + /// + /// note: difference in explicit keyword from the standard. + explicit hash_set(size_type nBucketCount, const Hash& hashFunction = Hash(), const Predicate& predicate = Predicate(), + const allocator_type& allocator = EASTL_HASH_SET_DEFAULT_ALLOCATOR) + : base_type(nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(), predicate, eastl::use_self(), allocator) + { + // Empty + } + + // hash_set(size_type nBucketCount, const allocator_type& allocator) + // hash_set(size_type nBucketCount, const Hash& hashFunction, const allocator_type& allocator) + + + hash_set(const this_type& x) + : base_type(x) + { + } + + // hash_set(const this_type& x, const allocator_type& allocator) + + + hash_set(this_type&& x) + : base_type(eastl::move(x)) + { + } + + + hash_set(this_type&& x, const allocator_type& allocator) + : base_type(eastl::move(x), allocator) + { + } + + + /// hash_set + /// + /// initializer_list-based constructor. + /// Allows for initializing with brace values (e.g. hash_set hs = { 3, 4, 5, }; ) + /// + hash_set(std::initializer_list ilist, size_type nBucketCount = 0, const Hash& hashFunction = Hash(), + const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_SET_DEFAULT_ALLOCATOR) + : base_type(ilist.begin(), ilist.end(), nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(), predicate, eastl::use_self(), allocator) + { + // Empty + } + + hash_set(std::initializer_list ilist, const allocator_type& allocator) + : base_type(ilist.begin(), ilist.end(), 0, Hash(), mod_range_hashing(), default_ranged_hash(), Predicate(), eastl::use_self(), allocator) + { + // Empty + } + + // hash_set(std::initializer_list ilist, size_type nBucketCount, const allocator_type& allocator) + + // hash_set(std::initializer_list ilist, size_type nBucketCount, const Hash& hashFunction, + // const allocator_type& allocator) + + + /// hash_set + /// + /// An input bucket count of <= 1 causes the bucket count to be equal to the number of + /// elements in the input range. + /// + template + hash_set(FowardIterator first, FowardIterator last, size_type nBucketCount = 0, const Hash& hashFunction = Hash(), + const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_SET_DEFAULT_ALLOCATOR) + : base_type(first, last, nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(), predicate, eastl::use_self(), allocator) + { + // Empty + } + + // template + // hash_set(ForwardIterator first, ForwardIterator last, size_type nBucketCount, const allocator_type& allocator) + + // template + // hash_set(ForwardIterator first, ForwardIterator last, size_type nBucketCount, const Hash& hashFunction, const allocator_type& allocator) + + + this_type& operator=(const this_type& x) + { + return static_cast(base_type::operator=(x)); + } + + + this_type& operator=(std::initializer_list ilist) + { + return static_cast(base_type::operator=(ilist)); + } + + + this_type& operator=(this_type&& x) + { + return static_cast(base_type::operator=(eastl::move(x))); + } + + }; // hash_set + + /// hash_set erase_if + /// + /// https://en.cppreference.com/w/cpp/container/unordered_set/erase_if + template + typename eastl::hash_set::size_type erase_if(eastl::hash_set& c, UserPredicate predicate) + { + auto oldSize = c.size(); + // Erases all elements that satisfy the predicate pred from the container. + for (auto i = c.begin(), last = c.end(); i != last;) + { + if (predicate(*i)) + { + i = c.erase(i); + } + else + { + ++i; + } + } + return oldSize - c.size(); + } + + + /// hash_multiset + /// + /// Implements a hash_multiset, which is the same thing as a hash_set + /// except that contained elements need not be unique. See the documentation + /// for hash_set for details. + /// + template , typename Predicate = eastl::equal_to, + typename Allocator = EASTLAllocatorType, bool bCacheHashCode = false> + class hash_multiset + : public hashtable, Predicate, + Hash, mod_range_hashing, default_ranged_hash, + prime_rehash_policy, bCacheHashCode, false, false> + { + public: + typedef hashtable, Predicate, + Hash, mod_range_hashing, default_ranged_hash, + prime_rehash_policy, bCacheHashCode, false, false> base_type; + typedef hash_multiset this_type; + typedef typename base_type::size_type size_type; + typedef typename base_type::value_type value_type; + typedef typename base_type::allocator_type allocator_type; + typedef typename base_type::node_type node_type; + + public: + /// hash_multiset + /// + /// Default constructor. + /// + explicit hash_multiset(const allocator_type& allocator = EASTL_HASH_MULTISET_DEFAULT_ALLOCATOR) + : base_type(0, Hash(), mod_range_hashing(), default_ranged_hash(), Predicate(), eastl::use_self(), allocator) + { + // Empty + } + + + /// hash_multiset + /// + /// Constructor which creates an empty container, but start with nBucketCount buckets. + /// We default to a small nBucketCount value, though the user really should manually + /// specify an appropriate value in order to prevent memory from being reallocated. + /// + /// note: difference in explicit keyword from the standard. + explicit hash_multiset(size_type nBucketCount, const Hash& hashFunction = Hash(), + const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_MULTISET_DEFAULT_ALLOCATOR) + : base_type(nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(), predicate, eastl::use_self(), allocator) + { + // Empty + } + + // hash_multiset(size_type nBucketCount, const allocator_type& allocator) + // hash_multiset(size_type nBucketCount, const Hash& hashFunction, const allocator_type& allocator) + + + hash_multiset(const this_type& x) + : base_type(x) + { + } + + // hash_multiset(const this_type& x, const allocator_type& allocator) + + + hash_multiset(this_type&& x) + : base_type(eastl::move(x)) + { + } + + + hash_multiset(this_type&& x, const allocator_type& allocator) + : base_type(eastl::move(x), allocator) + { + } + + + /// hash_multiset + /// + /// initializer_list-based constructor. + /// Allows for initializing with brace values (e.g. hash_set hs = { 3, 3, 4, }; ) + /// + hash_multiset(std::initializer_list ilist, size_type nBucketCount = 0, const Hash& hashFunction = Hash(), + const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_MULTISET_DEFAULT_ALLOCATOR) + : base_type(ilist.begin(), ilist.end(), nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(), predicate, eastl::use_self(), allocator) + { + // Empty + } + + hash_multiset(std::initializer_list ilist, const allocator_type& allocator) + : base_type(ilist.begin(), ilist.end(), 0, Hash(), mod_range_hashing(), default_ranged_hash(), Predicate(), eastl::use_self(), allocator) + { + // Empty + } + + // hash_multiset(std::initializer_list ilist, size_type nBucketCount, const allocator_type& allocator) + + // hash_multiset(std::initializer_list ilist, size_type nBucketCount, const Hash& hashFunction, + // const allocator_type& allocator) + + + /// hash_multiset + /// + /// An input bucket count of <= 1 causes the bucket count to be equal to the number of + /// elements in the input range. + /// + template + hash_multiset(FowardIterator first, FowardIterator last, size_type nBucketCount = 0, const Hash& hashFunction = Hash(), + const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_MULTISET_DEFAULT_ALLOCATOR) + : base_type(first, last, nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(), predicate, eastl::use_self(), allocator) + { + // Empty + } + + // template + // hash_multiset(ForwardIterator first, ForwardIterator last, size_type nBucketCount, const allocator_type& allocator) + + // template + // hash_multiset(ForwardIterator first, ForwardIterator last, size_type nBucketCount, const Hash& hashFunction, const allocator_type& allocator) + + + this_type& operator=(const this_type& x) + { + return static_cast(base_type::operator=(x)); + } + + + this_type& operator=(std::initializer_list ilist) + { + return static_cast(base_type::operator=(ilist)); + } + + + this_type& operator=(this_type&& x) + { + return static_cast(base_type::operator=(eastl::move(x))); + } + + }; // hash_multiset + + /// hash_multiset erase_if + /// + /// https://en.cppreference.com/w/cpp/container/unordered_multiset/erase_if + template + typename eastl::hash_multiset::size_type erase_if(eastl::hash_multiset& c, UserPredicate predicate) + { + auto oldSize = c.size(); + // Erases all elements that satisfy the predicate pred from the container. + for (auto i = c.begin(), last = c.end(); i != last;) + { + if (predicate(*i)) + { + i = c.erase(i); + } + else + { + ++i; + } + } + return oldSize - c.size(); + } + + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + inline bool operator==(const hash_set& a, + const hash_set& b) + { + typedef typename hash_set::const_iterator const_iterator; + + // We implement branching with the assumption that the return value is usually false. + if(a.size() != b.size()) + return false; + + // For set (with its unique keys), we need only test that each element in a can be found in b, + // as there can be only one such pairing per element. multiset needs to do a something more elaborate. + for(const_iterator ai = a.begin(), aiEnd = a.end(), biEnd = b.end(); ai != aiEnd; ++ai) + { + const_iterator bi = b.find(*ai); + + if((bi == biEnd) || !(*ai == *bi)) // We have to compare values in addition to making sure the lookups succeeded. This is because the lookup is done via the user-supplised Predicate + return false; // which isn't strictly required to be identical to the Value operator==, though 99% of the time it will be so. + } + + return true; + } + +#if !defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON) + template + inline bool operator!=(const hash_set& a, + const hash_set& b) + { + return !(a == b); + } +#endif + + template + inline bool operator==(const hash_multiset& a, + const hash_multiset& b) + { + typedef typename hash_multiset::const_iterator const_iterator; + typedef typename eastl::iterator_traits::difference_type difference_type; + + // We implement branching with the assumption that the return value is usually false. + if(a.size() != b.size()) + return false; + + // We can't simply search for each element of a in b, as it may be that the bucket for + // two elements in a has those same two elements in b but in different order (which should + // still result in equality). Also it's possible that one bucket in a has two elements which + // both match a solitary element in the equivalent bucket in b (which shouldn't result in equality). + eastl::pair aRange; + eastl::pair bRange; + + for(const_iterator ai = a.begin(), aiEnd = a.end(); ai != aiEnd; ai = aRange.second) // For each element in a... + { + aRange = a.equal_range(*ai); // Get the range of elements in a that are equal to ai. + bRange = b.equal_range(*ai); // Get the range of elements in b that are equal to ai. + + // We need to verify that aRange == bRange. First make sure the range sizes are equivalent... + const difference_type aDistance = eastl::distance(aRange.first, aRange.second); + const difference_type bDistance = eastl::distance(bRange.first, bRange.second); + + if(aDistance != bDistance) + return false; + + // At this point, aDistance > 0 and aDistance == bDistance. + // Implement a fast pathway for the case that there's just a single element. + if(aDistance == 1) + { + if(!(*aRange.first == *bRange.first)) // We have to compare values in addition to making sure the distance (element count) was equal. This is because the lookup is done via the user-supplised Predicate + return false; // which isn't strictly required to be identical to the Value operator==, though 99% of the time it will be so. Ditto for the is_permutation usage below. + } + else + { + // Check to see if these aRange and bRange are any permutation of each other. + // This check gets slower as there are more elements in the range. + if(!eastl::is_permutation(aRange.first, aRange.second, bRange.first)) + return false; + } + } + + return true; + } + +#if !defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON) + template + inline bool operator!=(const hash_multiset& a, + const hash_multiset& b) + { + return !(a == b); + } +#endif + +} // namespace eastl + + +#endif // Header include guard + + + + + + + + + + + + diff --git a/external/EASTL/include/EASTL/heap.h b/external/EASTL/include/EASTL/heap.h new file mode 100644 index 00000000..f0e770b9 --- /dev/null +++ b/external/EASTL/include/EASTL/heap.h @@ -0,0 +1,685 @@ +/////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +/////////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// This file implements heap functionality much like the std C++ heap algorithms. +// Such heaps are not the same thing as memory heaps or pools, but rather are +// semi-sorted random access containers which have the primary purpose of +// supporting the implementation of priority_queue and similar data structures. +// +// The primary distinctions between this heap functionality and std::heap are: +// - This heap exposes some extra functionality such as is_heap and change_heap. +// - This heap is more efficient than versions found in typical STL +// implementations such as STLPort, Microsoft, and Metrowerks. This comes +// about due to better use of array dereferencing and branch prediction. +// You should expect of 5-30%, depending on the usage and platform. +/////////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// The publicly usable functions we define are: +// push_heap -- Adds an entry to a heap. Same as C++ std::push_heap. +// pop_heap -- Removes the top entry from a heap. Same as C++ std::pop_heap. +// make_heap -- Converts an array to a heap. Same as C++ std::make_heap. +// sort_heap -- Sorts a heap in place. Same as C++ std::sort_heap. +// remove_heap -- Removes an arbitrary entry from a heap. +// change_heap -- Changes the priority of an entry in the heap. +// is_heap -- Returns true if an array appears is in heap format. Same as C++11 std::is_heap. +// is_heap_until -- Returns largest part of the range which is a heap. Same as C++11 std::is_heap_until. +/////////////////////////////////////////////////////////////////////////////// + + + +#ifndef EASTL_HEAP_H +#define EASTL_HEAP_H + + +#include +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + + /////////////////////////////////////////////////////////////////////// + // promote_heap (internal function) + /////////////////////////////////////////////////////////////////////// + + template + inline void promote_heap_impl(RandomAccessIterator first, Distance topPosition, Distance position, T value) + { + for(Distance parentPosition = (position - 1) >> 1; // This formula assumes that (position > 0). // We use '>> 1' instead of '/ 2' because we have seen VC++ generate better code with >>. + (position > topPosition) && (*(first + parentPosition) < value); + parentPosition = (position - 1) >> 1) + { + *(first + position) = eastl::forward(*(first + parentPosition)); // Swap the node with its parent. + position = parentPosition; + } + + *(first + position) = eastl::forward(value); + } + + /// promote_heap + /// + /// Moves a value in the heap from a given position upward until + /// it is sorted correctly. It's kind of like bubble-sort, except that + /// instead of moving linearly from the back of a list to the front, + /// it moves from the bottom of the tree up the branches towards the + /// top. But otherwise is just like bubble-sort. + /// + /// This function requires that the value argument refer to a value + /// that is currently not within the heap. + /// + template + inline void promote_heap(RandomAccessIterator first, Distance topPosition, Distance position, const T& value) + { + typedef typename iterator_traits::value_type value_type; + promote_heap_impl(first, topPosition, position, value); + } + + + /// promote_heap + /// + /// Moves a value in the heap from a given position upward until + /// it is sorted correctly. It's kind of like bubble-sort, except that + /// instead of moving linearly from the back of a list to the front, + /// it moves from the bottom of the tree up the branches towards the + /// top. But otherwise is just like bubble-sort. + /// + /// This function requires that the value argument refer to a value + /// that is currently not within the heap. + /// + template + inline void promote_heap(RandomAccessIterator first, Distance topPosition, Distance position, T&& value) + { + typedef typename iterator_traits::value_type value_type; + promote_heap_impl(first, topPosition, position, eastl::forward(value)); + } + + + template + inline void promote_heap_impl(RandomAccessIterator first, Distance topPosition, Distance position, T value, Compare compare) + { + for(Distance parentPosition = (position - 1) >> 1; // This formula assumes that (position > 0). // We use '>> 1' instead of '/ 2' because we have seen VC++ generate better code with >>. + (position > topPosition) && compare(*(first + parentPosition), value); + parentPosition = (position - 1) >> 1) + { + *(first + position) = eastl::forward(*(first + parentPosition)); // Swap the node with its parent. + position = parentPosition; + } + + *(first + position) = eastl::forward(value); + } + + + /// promote_heap + /// + /// Takes a Compare(a, b) function (or function object) which returns true if a < b. + /// For example, you could use the standard 'less' comparison object. + /// + /// The Compare function must work equivalently to the compare function used + /// to make and maintain the heap. + /// + /// This function requires that the value argument refer to a value + /// that is currently not within the heap. + /// + template + inline void promote_heap(RandomAccessIterator first, Distance topPosition, Distance position, const T& value, Compare compare) + { + typedef typename iterator_traits::value_type value_type; + promote_heap_impl(first, topPosition, position, value, compare); + } + + + /// promote_heap + /// + /// Takes a Compare(a, b) function (or function object) which returns true if a < b. + /// For example, you could use the standard 'less' comparison object. + /// + /// The Compare function must work equivalently to the compare function used + /// to make and maintain the heap. + /// + /// This function requires that the value argument refer to a value + /// that is currently not within the heap. + /// + template + inline void promote_heap(RandomAccessIterator first, Distance topPosition, Distance position, T&& value, Compare compare) + { + typedef typename iterator_traits::value_type value_type; + promote_heap_impl(first, topPosition, position, eastl::forward(value), compare); + } + + + + /////////////////////////////////////////////////////////////////////// + // adjust_heap (internal function) + /////////////////////////////////////////////////////////////////////// + + template + void adjust_heap_impl(RandomAccessIterator first, Distance topPosition, Distance heapSize, Distance position, T value) + { + // We do the conventional approach of moving the position down to the + // bottom then inserting the value at the back and moving it up. + Distance childPosition = (2 * position) + 2; + + for(; childPosition < heapSize; childPosition = (2 * childPosition) + 2) + { + if(*(first + childPosition) < *(first + (childPosition - 1))) // Choose the larger of the two children. + --childPosition; + *(first + position) = eastl::forward(*(first + childPosition)); // Swap positions with this child. + position = childPosition; + } + + if(childPosition == heapSize) // If we are at the very last index of the bottom... + { + *(first + position) = eastl::forward(*(first + (childPosition - 1))); + position = childPosition - 1; + } + + eastl::promote_heap(first, topPosition, position, eastl::forward(value)); + } + + /// adjust_heap + /// + /// Given a position that has just been vacated, this function moves + /// new values into that vacated position appropriately. The value + /// argument is an entry which will be inserted into the heap after + /// we move nodes into the positions that were vacated. + /// + /// This function requires that the value argument refer to a value + /// that is currently not within the heap. + /// + template + void adjust_heap(RandomAccessIterator first, Distance topPosition, Distance heapSize, Distance position, const T& value) + { + typedef typename iterator_traits::value_type value_type; + adjust_heap_impl(first, topPosition, heapSize, position, eastl::forward(value)); + } + + + /// adjust_heap + /// + /// Given a position that has just been vacated, this function moves + /// new values into that vacated position appropriately. The value + /// argument is an entry which will be inserted into the heap after + /// we move nodes into the positions that were vacated. + /// + /// This function requires that the value argument refer to a value + /// that is currently not within the heap. + /// + template + void adjust_heap(RandomAccessIterator first, Distance topPosition, Distance heapSize, Distance position, T&& value) + { + typedef typename iterator_traits::value_type value_type; + adjust_heap_impl(first, topPosition, heapSize, position, eastl::forward(value)); + } + + + template + void adjust_heap_impl(RandomAccessIterator first, Distance topPosition, Distance heapSize, Distance position, T value, Compare compare) + { + // We do the conventional approach of moving the position down to the + // bottom then inserting the value at the back and moving it up. + Distance childPosition = (2 * position) + 2; + + for(; childPosition < heapSize; childPosition = (2 * childPosition) + 2) + { + if(compare(*(first + childPosition), *(first + (childPosition - 1)))) // Choose the larger of the two children. + --childPosition; + *(first + position) = eastl::forward(*(first + childPosition)); // Swap positions with this child. + position = childPosition; + } + + if(childPosition == heapSize) // If we are at the bottom... + { + *(first + position) = eastl::forward(*(first + (childPosition - 1))); + position = childPosition - 1; + } + + eastl::promote_heap(first, topPosition, position, eastl::forward(value), compare); + } + + /// adjust_heap + /// + /// The Compare function must work equivalently to the compare function used + /// to make and maintain the heap. + /// + /// This function requires that the value argument refer to a value + /// that is currently not within the heap. + /// + template + void adjust_heap(RandomAccessIterator first, Distance topPosition, Distance heapSize, Distance position, const T& value, Compare compare) + { + typedef typename iterator_traits::value_type value_type; + adjust_heap_impl(first, topPosition, heapSize, position, eastl::forward(value), compare); + } + + + /// adjust_heap + /// + /// The Compare function must work equivalently to the compare function used + /// to make and maintain the heap. + /// + /// This function requires that the value argument refer to a value + /// that is currently not within the heap. + /// + template + void adjust_heap(RandomAccessIterator first, Distance topPosition, Distance heapSize, Distance position, T&& value, Compare compare) + { + typedef typename iterator_traits::value_type value_type; + adjust_heap_impl(first, topPosition, heapSize, position, eastl::forward(value), compare); + } + + + /////////////////////////////////////////////////////////////////////// + // push_heap + /////////////////////////////////////////////////////////////////////// + + /// push_heap + /// + /// Adds an item to a heap (which is an array). The item necessarily + /// comes from the back of the heap (array). Thus, the insertion of a + /// new item in a heap is a two step process: push_back and push_heap. + /// + /// Example usage: + /// vector heap; + /// + /// heap.push_back(3); + /// push_heap(heap.begin(), heap.end()); // Places '3' appropriately. + /// + template + inline void push_heap(RandomAccessIterator first, RandomAccessIterator last) + { + typedef typename eastl::iterator_traits::difference_type difference_type; + typedef typename eastl::iterator_traits::value_type value_type; + + const value_type tempBottom(eastl::forward(*(last - 1))); + + eastl::promote_heap + (first, (difference_type)0, (difference_type)(last - first - 1), eastl::forward(tempBottom)); + } + + + /// push_heap + /// + /// This version is useful for cases where your object comparison is unusual + /// or where you want to have the heap store pointers to objects instead of + /// storing the objects themselves (often in order to improve cache coherency + /// while doing sorting). + /// + /// The Compare function must work equivalently to the compare function used + /// to make and maintain the heap. + /// + template + inline void push_heap(RandomAccessIterator first, RandomAccessIterator last, Compare compare) + { + typedef typename eastl::iterator_traits::difference_type difference_type; + typedef typename eastl::iterator_traits::value_type value_type; + + const value_type tempBottom(*(last - 1)); + + eastl::promote_heap + (first, (difference_type)0, (difference_type)(last - first - 1), tempBottom, compare); + } + + + + + /////////////////////////////////////////////////////////////////////// + // pop_heap + /////////////////////////////////////////////////////////////////////// + + /// pop_heap + /// + /// Removes the first item from the heap (which is an array), and adjusts + /// the heap so that the highest priority item becomes the new first item. + /// + /// Example usage: + /// vector heap; + /// + /// heap.push_back(2); + /// heap.push_back(3); + /// heap.push_back(1); + /// + /// pop_heap(heap.begin(), heap.end()); // Moves heap[0] to the back of the heap and adjusts the heap. + /// heap.pop_back(); // Remove value that was just at the top of the heap + /// + template + inline void pop_heap(RandomAccessIterator first, RandomAccessIterator last) + { + typedef typename eastl::iterator_traits::difference_type difference_type; + typedef typename eastl::iterator_traits::value_type value_type; + + value_type tempBottom(eastl::forward(*(last - 1))); + *(last - 1) = eastl::forward(*first); + eastl::adjust_heap + (first, (difference_type)0, (difference_type)(last - first - 1), 0, eastl::forward(tempBottom)); + } + + + + /// pop_heap + /// + /// This version is useful for cases where your object comparison is unusual + /// or where you want to have the heap store pointers to objects instead of + /// storing the objects themselves (often in order to improve cache coherency + /// while doing sorting). + /// + /// The Compare function must work equivalently to the compare function used + /// to make and maintain the heap. + /// + template + inline void pop_heap(RandomAccessIterator first, RandomAccessIterator last, Compare compare) + { + typedef typename eastl::iterator_traits::difference_type difference_type; + typedef typename eastl::iterator_traits::value_type value_type; + + value_type tempBottom(eastl::forward(*(last - 1))); + *(last - 1) = eastl::forward(*first); + eastl::adjust_heap + (first, (difference_type)0, (difference_type)(last - first - 1), 0, eastl::forward(tempBottom), compare); + } + + + /////////////////////////////////////////////////////////////////////// + // make_heap + /////////////////////////////////////////////////////////////////////// + + + /// make_heap + /// + /// Given an array, this function converts it into heap format. + /// The complexity is O(n), where n is count of the range. + /// The input range is not required to be in any order. + /// + template + void make_heap(RandomAccessIterator first, RandomAccessIterator last) + { + // We do bottom-up heap construction as per Sedgewick. Such construction is O(n). + typedef typename eastl::iterator_traits::difference_type difference_type; + typedef typename eastl::iterator_traits::value_type value_type; + + const difference_type heapSize = last - first; + + if(heapSize >= 2) // If there is anything to do... (we need this check because otherwise the math fails below). + { + difference_type parentPosition = ((heapSize - 2) >> 1) + 1; // We use '>> 1' instead of '/ 2' because we have seen VC++ generate better code with >>. + + do{ + --parentPosition; + value_type temp(eastl::forward(*(first + parentPosition))); + eastl::adjust_heap + (first, parentPosition, heapSize, parentPosition, eastl::forward(temp)); + } while(parentPosition != 0); + } + } + + + template + void make_heap(RandomAccessIterator first, RandomAccessIterator last, Compare compare) + { + typedef typename eastl::iterator_traits::difference_type difference_type; + typedef typename eastl::iterator_traits::value_type value_type; + + const difference_type heapSize = last - first; + + if(heapSize >= 2) // If there is anything to do... (we need this check because otherwise the math fails below). + { + difference_type parentPosition = ((heapSize - 2) >> 1) + 1; // We use '>> 1' instead of '/ 2' because we have seen VC++ generate better code with >>. + + do{ + --parentPosition; + value_type temp(eastl::forward(*(first + parentPosition))); + eastl::adjust_heap + (first, parentPosition, heapSize, parentPosition, eastl::forward(temp), compare); + } while(parentPosition != 0); + } + } + + + /////////////////////////////////////////////////////////////////////// + // sort_heap + /////////////////////////////////////////////////////////////////////// + + /// sort_heap + /// + /// After the application if this algorithm, the range it was applied to + /// is no longer a heap, though it will be a reverse heap (smallest first). + /// The item with the lowest priority will be first, and the highest last. + /// This is not a stable sort because the relative order of equivalent + /// elements is not necessarily preserved. + /// The range referenced must be valid; all pointers must be dereferenceable + /// and within the sequence the last position is reachable from the first + /// by incrementation. + /// The complexity is at most O(n * log(n)), where n is count of the range. + /// + template + inline void sort_heap(RandomAccessIterator first, RandomAccessIterator last) + { + for(; (last - first) > 1; --last) // We simply use the heap to sort itself. + eastl::pop_heap(first, last); + } + + + /// sort_heap + /// + /// The Compare function must work equivalently to the compare function used + /// to make and maintain the heap. + /// + template + inline void sort_heap(RandomAccessIterator first, RandomAccessIterator last, Compare compare) + { + for(; (last - first) > 1; --last) // We simply use the heap to sort itself. + eastl::pop_heap(first, last, compare); + } + + + + /////////////////////////////////////////////////////////////////////// + // remove_heap + /////////////////////////////////////////////////////////////////////// + + /// remove_heap + /// + /// Removes an arbitrary entry from the heap and adjusts the heap appropriately. + /// This function is unlike pop_heap in that pop_heap moves the top item + /// to the back of the heap, whereas remove_heap moves an arbitrary item to + /// the back of the heap. + /// + /// Note: Since this function moves the element to the back of the heap and + /// doesn't actually remove it from the given container, the user must call + /// the container erase function if the user wants to erase the element + /// from the container. + /// + template + inline void remove_heap(RandomAccessIterator first, Distance heapSize, Distance position) + { + typedef typename eastl::iterator_traits::difference_type difference_type; + typedef typename eastl::iterator_traits::value_type value_type; + + const value_type tempBottom(*(first + heapSize - 1)); + *(first + heapSize - 1) = *(first + position); + eastl::adjust_heap + (first, (difference_type)0, (difference_type)(heapSize - 1), (difference_type)position, tempBottom); + } + + + /// remove_heap + /// + /// The Compare function must work equivalently to the compare function used + /// to make and maintain the heap. + /// + /// Note: Since this function moves the element to the back of the heap and + /// doesn't actually remove it from the given container, the user must call + /// the container erase function if the user wants to erase the element + /// from the container. + /// + template + inline void remove_heap(RandomAccessIterator first, Distance heapSize, Distance position, Compare compare) + { + typedef typename eastl::iterator_traits::difference_type difference_type; + typedef typename eastl::iterator_traits::value_type value_type; + + const value_type tempBottom(*(first + heapSize - 1)); + *(first + heapSize - 1) = *(first + position); + eastl::adjust_heap + (first, (difference_type)0, (difference_type)(heapSize - 1), (difference_type)position, tempBottom, compare); + } + + + + /////////////////////////////////////////////////////////////////////// + // change_heap + /////////////////////////////////////////////////////////////////////// + + /// change_heap + /// + /// Given a value in the heap that has changed in priority, this function + /// adjusts the heap appropriately. The heap size remains unchanged after + /// this operation. + /// + template + inline void change_heap(RandomAccessIterator first, Distance heapSize, Distance position) + { + typedef typename eastl::iterator_traits::difference_type difference_type; + typedef typename eastl::iterator_traits::value_type value_type; + + eastl::remove_heap(first, heapSize, position); + + value_type tempBottom(*(first + heapSize - 1)); + + eastl::promote_heap + (first, (difference_type)0, (difference_type)(heapSize - 1), tempBottom); + } + + + /// change_heap + /// + /// The Compare function must work equivalently to the compare function used + /// to make and maintain the heap. + /// + template + inline void change_heap(RandomAccessIterator first, Distance heapSize, Distance position, Compare compare) + { + typedef typename eastl::iterator_traits::difference_type difference_type; + typedef typename eastl::iterator_traits::value_type value_type; + + eastl::remove_heap(first, heapSize, position, compare); + + value_type tempBottom(*(first + heapSize - 1)); + + eastl::promote_heap + (first, (difference_type)0, (difference_type)(heapSize - 1), tempBottom, compare); + } + + + + /////////////////////////////////////////////////////////////////////// + // is_heap_until + /////////////////////////////////////////////////////////////////////// + + /// is_heap_until + /// + template + inline RandomAccessIterator is_heap_until(RandomAccessIterator first, RandomAccessIterator last) + { + int counter = 0; + + for(RandomAccessIterator child = first + 1; child < last; ++child, counter ^= 1) + { + if(*first < *child) // We must use operator <, and are not allowed to use > or >= here. + return child; + first += counter; // counter switches between 0 and 1 every time through. + } + + return last; + } + + + /// is_heap_until + /// + /// The Compare function must work equivalently to the compare function used + /// to make and maintain the heap. + /// + template + inline RandomAccessIterator is_heap_until(RandomAccessIterator first, RandomAccessIterator last, Compare compare) + { + int counter = 0; + + for(RandomAccessIterator child = first + 1; child < last; ++child, counter ^= 1) + { + if(compare(*first, *child)) + return child; + first += counter; // counter switches between 0 and 1 every time through. + } + + return last; + } + + + + /////////////////////////////////////////////////////////////////////// + // is_heap + /////////////////////////////////////////////////////////////////////// + + /// is_heap + /// + /// This is a useful debugging algorithm for verifying that a random + /// access container is in heap format. + /// + template + inline bool is_heap(RandomAccessIterator first, RandomAccessIterator last) + { + return (eastl::is_heap_until(first, last) == last); + } + + + /// is_heap + /// + /// The Compare function must work equivalently to the compare function used + /// to make and maintain the heap. + /// + template + inline bool is_heap(RandomAccessIterator first, RandomAccessIterator last, Compare compare) + { + return (eastl::is_heap_until(first, last, compare) == last); + } + + + // To consider: The following may be a faster implementation for most cases. + // + // template + // inline bool is_heap(RandomAccessIterator first, RandomAccessIterator last) + // { + // if(((uintptr_t)(last - first) & 1) == 0) // If the range has an even number of elements... + // --last; + // + // RandomAccessIterator parent = first, child = (first + 1); + // + // for(; child < last; child += 2, ++parent) + // { + // if((*parent < *child) || (*parent < *(child + 1))) + // return false; + // } + // + // if((((uintptr_t)(last - first) & 1) == 0) && (*parent < *child)) + // return false; + // + // return true; + // } + + +} // namespace eastl + + +#endif // Header include guard + + + + diff --git a/external/EASTL/include/EASTL/initializer_list.h b/external/EASTL/include/EASTL/initializer_list.h new file mode 100644 index 00000000..f79b7162 --- /dev/null +++ b/external/EASTL/include/EASTL/initializer_list.h @@ -0,0 +1,104 @@ +/////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +// +// This file #includes if it's available, else it defines +// its own version of std::initializer_list. It does not define eastl::initializer_list +// because that would not provide any use, due to how the C++11 Standard works. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INITIALIZER_LIST_H +#define EASTL_INITIALIZER_LIST_H + + +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + +#if defined(EA_HAVE_CPP11_INITIALIZER_LIST) // If the compiler can generate calls to std::initializer_list... + + // The initializer_list type must be declared in the std namespace, as that's the + // namespace the compiler uses when generating code to use it. + EA_DISABLE_ALL_VC_WARNINGS() + #include + EA_RESTORE_ALL_VC_WARNINGS() + +#else + + // If you get an error here about initializer_list being already defined, then the EA_HAVE_CPP11_INITIALIZER_LIST define from needs to be updated. + namespace std + { + // See the C++11 Standard, section 18.9. + template + class initializer_list + { + public: + typedef E value_type; + typedef const E& reference; + typedef const E& const_reference; + typedef size_t size_type; + typedef const E* iterator; // Must be const, as initializer_list (and its mpArray) is an immutable temp object. + typedef const E* const_iterator; + + private: + iterator mpArray; + size_type mArraySize; + + // This constructor is private, but the C++ compiler has the ability to call it, as per the C++11 Standard. + initializer_list(const_iterator pArray, size_type arraySize) + : mpArray(pArray), mArraySize(arraySize) { } + + public: + initializer_list() EA_NOEXCEPT // EA_NOEXCEPT requires a recent version of EABase. + : mpArray(NULL), mArraySize(0) { } + +#if defined(EA_COMPILER_MSVC) + // MSVC generates constructor calls with two pointers instead of one pointer + size. The constructor is + // public. + // See: https://docs.microsoft.com/en-us/cpp/standard-library/initializer-list-class#initializer_list + initializer_list(const_iterator pFirst, const_iterator pLast) EA_NOEXCEPT + : mpArray(pFirst), mArraySize(pLast - pFirst) { } +#endif + + size_type size() const EA_NOEXCEPT { return mArraySize; } + const_iterator begin() const EA_NOEXCEPT { return mpArray; } // Must be const_iterator, as initializer_list (and its mpArray) is an immutable temp object. + const_iterator end() const EA_NOEXCEPT { return mpArray + mArraySize; } + }; + + + template + const T* begin(std::initializer_list ilist) EA_NOEXCEPT + { + return ilist.begin(); + } + + template + const T* end(std::initializer_list ilist) EA_NOEXCEPT + { + return ilist.end(); + } + } + +#endif + + +#endif // Header include guard + + + + + + + + + + + + + + + diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/arch.h b/external/EASTL/include/EASTL/internal/atomic/arch/arch.h new file mode 100644 index 00000000..4924a591 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/arch.h @@ -0,0 +1,65 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_H +#define EASTL_ATOMIC_INTERNAL_ARCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// Include the architecture specific implementations +// +#if defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64) + + #include "x86/arch_x86.h" + +#elif defined(EA_PROCESSOR_ARM32) || defined(EA_PROCESSOR_ARM64) + + #include "arm/arch_arm.h" + +#endif + + +///////////////////////////////////////////////////////////////////////////////// + + +#include "arch_fetch_add.h" +#include "arch_fetch_sub.h" + +#include "arch_fetch_and.h" +#include "arch_fetch_xor.h" +#include "arch_fetch_or.h" + +#include "arch_add_fetch.h" +#include "arch_sub_fetch.h" + +#include "arch_and_fetch.h" +#include "arch_xor_fetch.h" +#include "arch_or_fetch.h" + +#include "arch_exchange.h" + +#include "arch_cmpxchg_weak.h" +#include "arch_cmpxchg_strong.h" + +#include "arch_load.h" +#include "arch_store.h" + +#include "arch_compiler_barrier.h" + +#include "arch_cpu_pause.h" + +#include "arch_memory_barrier.h" + +#include "arch_signal_fence.h" + +#include "arch_thread_fence.h" + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/arch_add_fetch.h b/external/EASTL/include/EASTL/internal/atomic/arch/arch_add_fetch.h new file mode 100644 index 00000000..65771f89 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/arch_add_fetch.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_ADD_FETCH_H +#define EASTL_ATOMIC_INTERNAL_ARCH_ADD_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_ADD_FETCH_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_8) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_8) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_8) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_8) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_8) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_16) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_16) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_16) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_16) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_16) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_32) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_32) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_32) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_32) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_32) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_64) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_64) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_64) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_64) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_64) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_128) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_128) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_128) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_128) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_128) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_ADD_FETCH_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/arch_and_fetch.h b/external/EASTL/include/EASTL/internal/atomic/arch/arch_and_fetch.h new file mode 100644 index 00000000..df7ba35d --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/arch_and_fetch.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_AND_FETCH_H +#define EASTL_ATOMIC_INTERNAL_ARCH_AND_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_AND_FETCH_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_8) + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_8) + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_8) + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_8) + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_8) + #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_16) + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_16) + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_16) + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_16) + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_16) + #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_32) + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_32) + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_32) + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_32) + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_32) + #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_64) + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_64) + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_64) + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_64) + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_64) + #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_128) + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_128) + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_128) + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_128) + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_128) + #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_AND_FETCH_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/arch_cmpxchg_strong.h b/external/EASTL/include/EASTL/internal/atomic/arch/arch_cmpxchg_strong.h new file mode 100644 index 00000000..1005dc33 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/arch_cmpxchg_strong.h @@ -0,0 +1,430 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_CMPXCHG_STRONG_H +#define EASTL_ATOMIC_INTERNAL_ARCH_CMPXCHG_STRONG_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_*_*_N(type, bool ret, type * ptr, type * expected, type desired) +// +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128_AVAILABLE 0 +#endif + + +///////////////////////////////////////////////////////////////////////////////// + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_*_N(type, bool ret, type * ptr, type * expected, type desired) +// +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_8_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_8_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_8_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_8(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_8_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_8(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_8_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_8(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired) + + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_16_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_16_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_16_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_16(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_16_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_16(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_16_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_16(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16(type, ret, ptr, expected, desired) + + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_32_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_32_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_32_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_32(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_32_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_32(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_32_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_32(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired) + + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_64_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_64_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_64_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_64(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_64_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_64(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_64_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_64(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired) + + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_128_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_128_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_128_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_128(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_128_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_128(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_128_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_128(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_CMPXCHG_STRONG_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/arch_cmpxchg_weak.h b/external/EASTL/include/EASTL/internal/atomic/arch/arch_cmpxchg_weak.h new file mode 100644 index 00000000..5ce26386 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/arch_cmpxchg_weak.h @@ -0,0 +1,430 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_CMPXCHG_WEAK_H +#define EASTL_ATOMIC_INTERNAL_ARCH_CMPXCHG_WEAK_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_*_*_N(type, bool ret, type * ptr, type * expected, type desired) +// +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_8) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_8) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_8) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_8) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_16) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_16) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_16) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_16) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_32) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_32) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_32) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_32) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_64) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_64) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_64) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_64) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128_AVAILABLE 0 +#endif + + +///////////////////////////////////////////////////////////////////////////////// + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_*_N(type, bool ret, type * ptr, type * expected, type desired) +// +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_8_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_8_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_8_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_8(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_8_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_8(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_8_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_8(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired) + + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_16_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_16_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_16_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_16(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_16_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_16(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_16_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_16(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16(type, ret, ptr, expected, desired) + + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_32_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_32_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_32_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_32(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_32_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_32(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_32_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_32(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired) + + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_64_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_64_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_64_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_64(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_64_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_64(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_64_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_64(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired) + + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_128_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_128_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_128_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_128(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_128_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_128(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_128_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_128(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_CMPXCHG_WEAK_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/arch_compiler_barrier.h b/external/EASTL/include/EASTL/internal/atomic/arch/arch_compiler_barrier.h new file mode 100644 index 00000000..0652469b --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/arch_compiler_barrier.h @@ -0,0 +1,19 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_COMPILER_BARRIER_H +#define EASTL_ATOMIC_INTERNAL_ARCH_COMPILER_BARRIER_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_ARCH_ATOMIC_COMPILER_BARRIER_AVAILABLE 0 + +#define EASTL_ARCH_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY_AVAILABLE 0 + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_COMPILER_BARRIER_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/arch_cpu_pause.h b/external/EASTL/include/EASTL/internal/atomic/arch/arch_cpu_pause.h new file mode 100644 index 00000000..e8c2d1d7 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/arch_cpu_pause.h @@ -0,0 +1,25 @@ +///////////////////////////////////////////////////////////////////////////////// +// copyright (c) electronic arts inc. all rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_CPU_PAUSE_H +#define EASTL_ATOMIC_INTERNAL_ARCH_CPU_PAUSE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_CPU_PAUSE() +// +#if defined(EASTL_ARCH_ATOMIC_CPU_PAUSE) + #define EASTL_ARCH_ATOMIC_CPU_PAUSE_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CPU_PAUSE_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_CPU_PAUSE_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/arch_exchange.h b/external/EASTL/include/EASTL/internal/atomic/arch/arch_exchange.h new file mode 100644 index 00000000..76003188 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/arch_exchange.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_EXCHANGE_H +#define EASTL_ATOMIC_INTERNAL_ARCH_EXCHANGE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_EXCHANGE_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_8) + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_8) + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_8) + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_8) + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_8) + #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_16) + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_16) + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_16) + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_16) + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_16) + #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_32) + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_32) + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_32) + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_32) + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_32) + #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_64) + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_64) + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_64) + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_64) + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_64) + #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_128) + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_128) + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_128) + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_128) + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_128) + #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_EXCHANGE_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/arch_fetch_add.h b/external/EASTL/include/EASTL/internal/atomic/arch/arch_fetch_add.h new file mode 100644 index 00000000..71907f70 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/arch_fetch_add.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_FETCH_ADD_H +#define EASTL_ATOMIC_INTERNAL_ARCH_FETCH_ADD_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_FETCH_ADD_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_8) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_8) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_8) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_8) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_8) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_16) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_16) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_16) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_16) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_16) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_32) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_32) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_32) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_32) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_32) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_64) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_64) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_64) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_64) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_64) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_128) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_128) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_128) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_128) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_128) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_FETCH_ADD_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/arch_fetch_and.h b/external/EASTL/include/EASTL/internal/atomic/arch/arch_fetch_and.h new file mode 100644 index 00000000..f2b39a4c --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/arch_fetch_and.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_FETCH_AND_H +#define EASTL_ATOMIC_INTERNAL_ARCH_FETCH_AND_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_FETCH_AND_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_8) + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_8) + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_8) + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_8) + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_8) + #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_16) + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_16) + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_16) + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_16) + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_16) + #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_32) + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_32) + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_32) + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_32) + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_32) + #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_64) + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_64) + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_64) + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_64) + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_64) + #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_128) + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_128) + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_128) + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_128) + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_128) + #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_FETCH_AND_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/arch_fetch_or.h b/external/EASTL/include/EASTL/internal/atomic/arch/arch_fetch_or.h new file mode 100644 index 00000000..dd6dd0db --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/arch_fetch_or.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_FETCH_OR_H +#define EASTL_ATOMIC_INTERNAL_ARCH_FETCH_OR_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_FETCH_OR_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_8) + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_8) + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_8) + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_8) + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_8) + #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_16) + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_16) + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_16) + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_16) + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_16) + #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_32) + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_32) + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_32) + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_32) + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_32) + #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_64) + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_64) + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_64) + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_64) + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_64) + #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_128) + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_128) + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_128) + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_128) + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_128) + #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_FETCH_OR_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/arch_fetch_sub.h b/external/EASTL/include/EASTL/internal/atomic/arch/arch_fetch_sub.h new file mode 100644 index 00000000..ea63db73 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/arch_fetch_sub.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_FETCH_SUB_H +#define EASTL_ATOMIC_INTERNAL_ARCH_FETCH_SUB_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_FETCH_SUB_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_8) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_8) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_8) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_8) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_8) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_16) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_16) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_16) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_16) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_16) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_32) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_32) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_32) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_32) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_32) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_64) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_64) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_64) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_64) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_64) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_128) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_128) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_128) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_128) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_128) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_FETCH_SUB_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/arch_fetch_xor.h b/external/EASTL/include/EASTL/internal/atomic/arch/arch_fetch_xor.h new file mode 100644 index 00000000..b41ad2d4 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/arch_fetch_xor.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_FETCH_XOR_H +#define EASTL_ATOMIC_INTERNAL_ARCH_FETCH_XOR_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_FETCH_XOR_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_8) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_8) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_8) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_8) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_8) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_16) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_16) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_16) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_16) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_16) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_32) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_32) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_32) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_32) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_32) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_64) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_64) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_64) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_64) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_64) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_128) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_128) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_128) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_128) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_128) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_FETCH_XOR_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/arch_load.h b/external/EASTL/include/EASTL/internal/atomic/arch/arch_load.h new file mode 100644 index 00000000..eea7cf49 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/arch_load.h @@ -0,0 +1,125 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_LOAD_H +#define EASTL_ATOMIC_INTERNAL_ARCH_LOAD_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_LOAD_*_N(type, type ret, type * ptr) +// +#if defined(EASTL_ARCH_ATOMIC_LOAD_RELAXED_8) + #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_8) + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_8) + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_LOAD_RELAXED_16) + #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_16) + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_16) + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_LOAD_RELAXED_32) + #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_32) + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_32) + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_LOAD_READ_DEPENDS_32) + #define EASTL_ARCH_ATOMIC_LOAD_READ_DEPENDS_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_LOAD_READ_DEPENDS_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_LOAD_RELAXED_64) + #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_64) + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_64) + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_LOAD_READ_DEPENDS_64) + #define EASTL_ARCH_ATOMIC_LOAD_READ_DEPENDS_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_LOAD_READ_DEPENDS_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_LOAD_RELAXED_128) + #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_128) + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_128) + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_LOAD_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/arch_memory_barrier.h b/external/EASTL/include/EASTL/internal/atomic/arch/arch_memory_barrier.h new file mode 100644 index 00000000..c6cc6bfc --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/arch_memory_barrier.h @@ -0,0 +1,47 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_MEMORY_BARRIER_H +#define EASTL_ATOMIC_INTERNAL_ARCH_MEMORY_BARRIER_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_CPU_MB() +// +#if defined(EASTL_ARCH_ATOMIC_CPU_MB) + #define EASTL_ARCH_ATOMIC_CPU_MB_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CPU_MB_AVAILABLE 0 +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_CPU_WMB() +// +#if defined(EASTL_ARCH_ATOMIC_CPU_WMB) + #define EASTL_ARCH_ATOMIC_CPU_WMB_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CPU_WMB_AVAILABLE 0 +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_CPU_RMB() +// +#if defined(EASTL_ARCH_ATOMIC_CPU_RMB) + #define EASTL_ARCH_ATOMIC_CPU_RMB_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CPU_RMB_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_MEMORY_BARRIER_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/arch_or_fetch.h b/external/EASTL/include/EASTL/internal/atomic/arch/arch_or_fetch.h new file mode 100644 index 00000000..110326b4 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/arch_or_fetch.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_OR_FETCH_H +#define EASTL_ATOMIC_INTERNAL_ARCH_OR_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_OR_FETCH_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_8) + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_8) + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_8) + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_8) + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_8) + #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_16) + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_16) + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_16) + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_16) + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_16) + #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_32) + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_32) + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_32) + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_32) + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_32) + #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_64) + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_64) + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_64) + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_64) + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_64) + #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_128) + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_128) + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_128) + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_128) + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_128) + #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_OR_FETCH_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/arch_signal_fence.h b/external/EASTL/include/EASTL/internal/atomic/arch/arch_signal_fence.h new file mode 100644 index 00000000..65b64fc2 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/arch_signal_fence.h @@ -0,0 +1,21 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_SIGNAL_FENCE_H +#define EASTL_ATOMIC_INTERNAL_ARCH_SIGNAL_FENCE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_ARCH_ATOMIC_SIGNAL_FENCE_RELAXED_AVAILABLE 0 +#define EASTL_ARCH_ATOMIC_SIGNAL_FENCE_ACQUIRE_AVAILABLE 0 +#define EASTL_ARCH_ATOMIC_SIGNAL_FENCE_RELEASE_AVAILABLE 0 +#define EASTL_ARCH_ATOMIC_SIGNAL_FENCE_ACQ_REL_AVAILABLE 0 +#define EASTL_ARCH_ATOMIC_SIGNAL_FENCE_SEQ_CST_AVAILABLE 0 + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_SIGNAL_FENCE_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/arch_store.h b/external/EASTL/include/EASTL/internal/atomic/arch/arch_store.h new file mode 100644 index 00000000..9a4112cb --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/arch_store.h @@ -0,0 +1,113 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_STORE_H +#define EASTL_ATOMIC_INTERNAL_ARCH_STORE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_STORE_*_N(type, type * ptr, type val) +// +#if defined(EASTL_ARCH_ATOMIC_STORE_RELAXED_8) + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_STORE_RELEASE_8) + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_STORE_SEQ_CST_8) + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_STORE_RELAXED_16) + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_STORE_RELEASE_16) + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_STORE_SEQ_CST_16) + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_STORE_RELAXED_32) + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_STORE_RELEASE_32) + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_STORE_SEQ_CST_32) + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_STORE_RELAXED_64) + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_STORE_RELEASE_64) + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_STORE_SEQ_CST_64) + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_STORE_RELAXED_128) + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_STORE_RELEASE_128) + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_STORE_SEQ_CST_128) + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_STORE_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/arch_sub_fetch.h b/external/EASTL/include/EASTL/internal/atomic/arch/arch_sub_fetch.h new file mode 100644 index 00000000..20241b14 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/arch_sub_fetch.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_SUB_FETCH_H +#define EASTL_ATOMIC_INTERNAL_ARCH_SUB_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_SUB_FETCH_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_8) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_8) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_8) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_8) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_8) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_16) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_16) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_16) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_16) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_16) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_32) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_32) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_32) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_32) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_32) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_64) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_64) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_64) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_64) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_64) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_128) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_128) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_128) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_128) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_128) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_SUB_FETCH_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/arch_thread_fence.h b/external/EASTL/include/EASTL/internal/atomic/arch/arch_thread_fence.h new file mode 100644 index 00000000..676fbf19 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/arch_thread_fence.h @@ -0,0 +1,49 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_THREAD_FENCE_H +#define EASTL_ATOMIC_INTERNAL_ARCH_THREAD_FENCE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_THREAD_FENCE_*() +// +#if defined(EASTL_ARCH_ATOMIC_THREAD_FENCE_RELAXED) + #define EASTL_ARCH_ATOMIC_THREAD_FENCE_RELAXED_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_THREAD_FENCE_RELAXED_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_THREAD_FENCE_ACQUIRE) + #define EASTL_ARCH_ATOMIC_THREAD_FENCE_ACQUIRE_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_THREAD_FENCE_ACQUIRE_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_THREAD_FENCE_RELEASE) + #define EASTL_ARCH_ATOMIC_THREAD_FENCE_RELEASE_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_THREAD_FENCE_RELEASE_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_THREAD_FENCE_ACQ_REL) + #define EASTL_ARCH_ATOMIC_THREAD_FENCE_ACQ_REL_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_THREAD_FENCE_ACQ_REL_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_THREAD_FENCE_SEQ_CST) + #define EASTL_ARCH_ATOMIC_THREAD_FENCE_SEQ_CST_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_THREAD_FENCE_SEQ_CST_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_THREAD_FENCE_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/arch_xor_fetch.h b/external/EASTL/include/EASTL/internal/atomic/arch/arch_xor_fetch.h new file mode 100644 index 00000000..63548c22 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/arch_xor_fetch.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_XOR_FETCH_H +#define EASTL_ATOMIC_INTERNAL_ARCH_XOR_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_XOR_FETCH_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_8) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_8) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_8) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_8) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_8) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_16) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_16) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_16) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_16) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_16) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_32) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_32) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_32) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_32) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_32) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_64) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_64) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_64) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_64) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_64) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_128) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_128) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_128) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_128) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_128) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_XOR_FETCH_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/arm/arch_arm.h b/external/EASTL/include/EASTL/internal/atomic/arch/arm/arch_arm.h new file mode 100644 index 00000000..cc2ce522 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/arm/arch_arm.h @@ -0,0 +1,89 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_ARM_H +#define EASTL_ATOMIC_INTERNAL_ARCH_ARM_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +/** + * NOTE: We use this mapping + * + * ARMv7 Mapping 'trailing sync;': + * + * Load Relaxed : ldr + * Load Acquire : ldr; dmb ish + * Load Seq_Cst : ldr; dmb ish + * + * Store Relaxed : str + * Store Release : dmb ish; str + * Store Seq_Cst : dmb ish; str; dmb ish + * + * Relaxed Fence : + * Acquire Fence : dmb ish + * Release Fence : dmb ish + * Acq_Rel Fence : dmb ish + * Seq_Cst Fence : dmb ish + */ + +/** + * ARMv7 Mapping 'leading sync;'; + * + * Load Relaxed : ldr + * Load Acquire : ldr; dmb ish + * Load Seq_Cst : dmb ish; ldr; dmb ish + * + * Store Relaxed : str + * Store Release : dmb ish; str + * Store Seq_Cst : dmb ish: str + * + * Relaxed Fence : + * Acquire Fence : dmb ish + * Release Fence : dmb ish + * Acq_Rel Fence : dmb ish + * Seq_Cst Fence : dmb ish + */ + +/** + * NOTE: + * + * On ARM32/64, we use the 'trailing sync;' convention with the stricter load acquire that uses + * a dmb instead of a control dependency + isb to ensure the IRIW litmus test is satisfied + * as one reason. See EASTL/atomic.h for futher explanation and deep-dive. + * + * For ARMv8 we could move to use the new proper store release and load acquire, RCsc variant. + * All ARMv7 approaches work on ARMv8 and this code path is only used on msvc which isn't used + * heavily. Most of the ARM code will end up going thru clang or gcc since microsoft arm devices + * aren't that abundant. + */ + + +///////////////////////////////////////////////////////////////////////////////// + + +#if defined(EA_COMPILER_MSVC) + + #if EA_PLATFORM_PTR_SIZE == 8 + #define EASTL_ARCH_ATOMIC_HAS_128BIT + #endif + +#endif + + +///////////////////////////////////////////////////////////////////////////////// + + +#include "arch_arm_load.h" +#include "arch_arm_store.h" + +#include "arch_arm_memory_barrier.h" + +#include "arch_arm_thread_fence.h" + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_ARM_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/arm/arch_arm_load.h b/external/EASTL/include/EASTL/internal/atomic/arch/arm/arch_arm_load.h new file mode 100644 index 00000000..e3b79b84 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/arm/arch_arm_load.h @@ -0,0 +1,156 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_ARM_LOAD_H +#define EASTL_ATOMIC_INTERNAL_ARCH_ARM_LOAD_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_LOAD_*_N(type, type ret, type * ptr) +// +#if defined(EA_COMPILER_MSVC) + + + /** + * NOTE: + * + * Even 8-byte aligned 64-bit memory accesses on ARM32 are not + * guaranteed to be atomic on all ARM32 cpus. Only guaranteed on + * cpus with the LPAE extension. We need to use a + * ldrexd instruction in order to ensure no shearing is observed + * for all ARM32 processors. + */ + #if defined(EA_PROCESSOR_ARM32) + + #define EASTL_ARCH_ATOMIC_ARM32_LDREXD(ret, ptr) \ + ret = __ldrexd((ptr)) + + #endif + + + #define EASTL_ARCH_ATOMIC_ARM_LOAD_N(integralType, bits, type, ret, ptr) \ + { \ + integralType retIntegral; \ + retIntegral = EA_PREPROCESSOR_JOIN(__iso_volatile_load, bits)(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr))); \ + \ + ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, retIntegral); \ + } + + + #define EASTL_ARCH_ATOMIC_ARM_LOAD_8(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_ARM_LOAD_N(__int8, 8, type, ret, ptr) + + #define EASTL_ARCH_ATOMIC_ARM_LOAD_16(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_ARM_LOAD_N(__int16, 16, type, ret, ptr) + + #define EASTL_ARCH_ATOMIC_ARM_LOAD_32(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_ARM_LOAD_N(__int32, 32, type, ret, ptr) + + + #if defined(EA_PROCESSOR_ARM32) + + + #define EASTL_ARCH_ATOMIC_LOAD_64(type, ret, ptr) \ + { \ + __int64 loadRet64; \ + EASTL_ARCH_ATOMIC_ARM32_LDREXD(loadRet64, EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(__int64, (ptr))); \ + \ + ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, loadRet64); \ + } + + #else + + #define EASTL_ARCH_ATOMIC_ARM_LOAD_64(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_ARM_LOAD_N(__int64, 64, type, ret, ptr) + + #endif + + + /** + * NOTE: + * + * The ARM documentation states the following: + * A 64-bit pair requires the address to be quadword aligned and is single-copy atomic for each doubleword at doubleword granularity + * + * Thus we must ensure the store succeeds inorder for the load to be observed as atomic. + * Thus we must use the full cmpxchg in order to do a proper atomic load. + */ + #define EASTL_ARCH_ATOMIC_ARM_LOAD_128(type, ret, ptr, MemoryOrder) \ + { \ + bool cmpxchgRetBool; \ + ret = *(ptr); \ + do \ + { \ + EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_CMPXCHG_STRONG_, MemoryOrder), _128)(type, cmpxchgRetBool, \ + ptr, &(ret), ret); \ + } while (!cmpxchgRetBool); \ + } + + + #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_8(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_ARM_LOAD_8(type, ret, ptr) + + #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_16(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_ARM_LOAD_16(type, ret, ptr) + + #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_32(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_ARM_LOAD_32(type, ret, ptr) + + #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_64(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_ARM_LOAD_64(type, ret, ptr) + + #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_128(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_ARM_LOAD_128(type, ret, ptr, RELAXED) + + + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_8(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_ARM_LOAD_8(type, ret, ptr); \ + EASTL_ATOMIC_CPU_MB() + + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_16(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_ARM_LOAD_16(type, ret, ptr); \ + EASTL_ATOMIC_CPU_MB() + + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_32(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_ARM_LOAD_32(type, ret, ptr); \ + EASTL_ATOMIC_CPU_MB() + + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_64(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_ARM_LOAD_64(type, ret, ptr); \ + EASTL_ATOMIC_CPU_MB() + + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_128(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_ARM_LOAD_128(type, ret, ptr, ACQUIRE) + + + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_8(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_ARM_LOAD_8(type, ret, ptr); \ + EASTL_ATOMIC_CPU_MB() + + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_16(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_ARM_LOAD_16(type, ret, ptr); \ + EASTL_ATOMIC_CPU_MB() + + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_32(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_ARM_LOAD_32(type, ret, ptr); \ + EASTL_ATOMIC_CPU_MB() + + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_64(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_ARM_LOAD_64(type, ret, ptr); \ + EASTL_ATOMIC_CPU_MB() + + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_128(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_ARM_LOAD_128(type, ret, ptr, SEQ_CST) + + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_ARM_LOAD_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/arm/arch_arm_memory_barrier.h b/external/EASTL/include/EASTL/internal/atomic/arch/arm/arch_arm_memory_barrier.h new file mode 100644 index 00000000..44dc991d --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/arm/arch_arm_memory_barrier.h @@ -0,0 +1,97 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_ARM_MEMORY_BARRIER_H +#define EASTL_ATOMIC_INTERNAL_ARCH_ARM_MEMORY_BARRIER_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#if defined(EA_COMPILER_MSVC) && !defined(EA_COMPILER_CLANG_CL) + + #if defined(EA_PROCESSOR_ARM32) + + #define EASTL_ARM_DMB_ISH _ARM_BARRIER_ISH + + #define EASTL_ARM_DMB_ISHST _ARM_BARRIER_ISHST + + #define EASTL_ARM_DMB_ISHLD _ARM_BARRIER_ISH + + #elif defined(EA_PROCESSOR_ARM64) + + #define EASTL_ARM_DMB_ISH _ARM64_BARRIER_ISH + + #define EASTL_ARM_DMB_ISHST _ARM64_BARRIER_ISHST + + #define EASTL_ARM_DMB_ISHLD _ARM64_BARRIER_ISHLD + + #endif + + + /** + * NOTE: + * + * While it makes no sense for a hardware memory barrier to not imply a compiler barrier. + * MSVC docs do not explicitly state that, so better to be safe than sorry chasing down + * hard to find bugs due to the compiler deciding to reorder things. + */ + + #define EASTL_ARCH_ATOMIC_ARM_EMIT_DMB(option) \ + EASTL_ATOMIC_COMPILER_BARRIER(); \ + __dmb(option); \ + EASTL_ATOMIC_COMPILER_BARRIER() + + +#elif defined(EA_COMPILER_GNUC) || defined(__clang__) + + #define EASTL_ARM_DMB_ISH ish + + #define EASTL_ARM_DMB_ISHST ishst + + #if defined(EA_PROCESSOR_ARM32) + + #define EASTL_ARM_DMB_ISHLD ish + + #elif defined(EA_PROCESSOR_ARM64) + + #define EASTL_ARM_DMB_ISHLD ishld + + #endif + + + #define EASTL_ARCH_ATOMIC_ARM_EMIT_DMB(option) \ + __asm__ __volatile__ ("dmb " EA_STRINGIFY(option) ::: "memory") + + +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_CPU_MB() +// +#define EASTL_ARCH_ATOMIC_CPU_MB() \ + EASTL_ARCH_ATOMIC_ARM_EMIT_DMB(EASTL_ARM_DMB_ISH) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_CPU_WMB() +// +#define EASTL_ARCH_ATOMIC_CPU_WMB() \ + EASTL_ARCH_ATOMIC_ARM_EMIT_DMB(EASTL_ARM_DMB_ISHST) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_CPU_RMB() +// +#define EASTL_ARCH_ATOMIC_CPU_RMB() \ + EASTL_ARCH_ATOMIC_ARM_EMIT_DMB(EASTL_ARM_DMB_ISHLD) + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_ARM_MEMORY_BARRIER_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/arm/arch_arm_store.h b/external/EASTL/include/EASTL/internal/atomic/arch/arm/arch_arm_store.h new file mode 100644 index 00000000..ab53b9d4 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/arm/arch_arm_store.h @@ -0,0 +1,142 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_ARM_STORE_H +#define EASTL_ATOMIC_INTERNAL_ARCH_ARM_STORE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_STORE_*_N(type, type * ptr, type val) +// +#if defined(EA_COMPILER_MSVC) + + + #define EASTL_ARCH_ATOMIC_ARM_STORE_N(integralType, bits, type, ptr, val) \ + EA_PREPROCESSOR_JOIN(__iso_volatile_store, bits)(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), EASTL_ATOMIC_TYPE_PUN_CAST(integralType, (val))) + + + #define EASTL_ARCH_ATOMIC_ARM_STORE_8(type, ptr, val) \ + EASTL_ARCH_ATOMIC_ARM_STORE_N(__int8, 8, type, ptr, val) + + #define EASTL_ARCH_ATOMIC_ARM_STORE_16(type, ptr, val) \ + EASTL_ARCH_ATOMIC_ARM_STORE_N(__int16, 16, type, ptr, val) + + #define EASTL_ARCH_ATOMIC_ARM_STORE_32(type, ptr, val) \ + EASTL_ARCH_ATOMIC_ARM_STORE_N(__int32, 32, type, ptr, val) + + + #if defined(EA_PROCESSOR_ARM64) + + #define EASTL_ARCH_ATOMIC_ARM_STORE_64(type, ptr, val) \ + EASTL_ARCH_ATOMIC_ARM_STORE_N(__int64, 64, type, ptr, val) + + #endif + + + #define EASTL_ARCH_ATOMIC_ARM_STORE_128(type, ptr, val, MemoryOrder) \ + { \ + type exchange128; EA_UNUSED(exchange128); \ + EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_EXCHANGE_, MemoryOrder), _128)(type, exchange128, ptr, val); \ + } + + + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_8(type, ptr, val) \ + EASTL_ARCH_ATOMIC_ARM_STORE_8(type, ptr, val) + + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_16(type, ptr, val) \ + EASTL_ARCH_ATOMIC_ARM_STORE_16(type, ptr, val) + + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_32(type, ptr, val) \ + EASTL_ARCH_ATOMIC_ARM_STORE_32(type, ptr, val) + + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_128(type, ptr, val) \ + EASTL_ARCH_ATOMIC_ARM_STORE_128(type, ptr, val, RELAXED) + + + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_8(type, ptr, val) \ + EASTL_ATOMIC_CPU_MB(); \ + EASTL_ARCH_ATOMIC_ARM_STORE_8(type, ptr, val) + + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_16(type, ptr, val) \ + EASTL_ATOMIC_CPU_MB(); \ + EASTL_ARCH_ATOMIC_ARM_STORE_16(type, ptr, val) + + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_32(type, ptr, val) \ + EASTL_ATOMIC_CPU_MB(); \ + EASTL_ARCH_ATOMIC_ARM_STORE_32(type, ptr, val) + + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_128(type, ptr, val) \ + EASTL_ARCH_ATOMIC_ARM_STORE_128(type, ptr, val, RELEASE) + + + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_8(type, ptr, val) \ + EASTL_ATOMIC_CPU_MB(); \ + EASTL_ARCH_ATOMIC_ARM_STORE_8(type, ptr, val) ; \ + EASTL_ATOMIC_CPU_MB() + + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_16(type, ptr, val) \ + EASTL_ATOMIC_CPU_MB(); \ + EASTL_ARCH_ATOMIC_ARM_STORE_16(type, ptr, val); \ + EASTL_ATOMIC_CPU_MB() + + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_32(type, ptr, val) \ + EASTL_ATOMIC_CPU_MB(); \ + EASTL_ARCH_ATOMIC_ARM_STORE_32(type, ptr, val); \ + EASTL_ATOMIC_CPU_MB() + + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_128(type, ptr, val) \ + EASTL_ARCH_ATOMIC_ARM_STORE_128(type, ptr, val, SEQ_CST) + + + #if defined(EA_PROCESSOR_ARM32) + + + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_64(type, ptr, val) \ + { \ + type retExchange64; EA_UNUSED(retExchange64); \ + EASTL_ATOMIC_EXCHANGE_RELAXED_64(type, retExchange64, ptr, val); \ + } + + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_64(type, ptr, val) \ + { \ + type retExchange64; EA_UNUSED(retExchange64); \ + EASTL_ATOMIC_EXCHANGE_RELEASE_64(type, retExchange64, ptr, val); \ + } + + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_64(type, ptr, val) \ + { \ + type retExchange64; EA_UNUSED(retExchange64); \ + EASTL_ATOMIC_EXCHANGE_SEQ_CST_64(type, retExchange64, ptr, val); \ + } + + + #elif defined(EA_PROCESSOR_ARM64) + + + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_64(type, ptr, val) \ + EASTL_ARCH_ATOMIC_ARM_STORE_64(type, ptr, val) + + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_64(type, ptr, val) \ + EASTL_ATOMIC_CPU_MB(); \ + EASTL_ARCH_ATOMIC_ARM_STORE_64(type, ptr, val) + + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_64(type, ptr, val) \ + EASTL_ATOMIC_CPU_MB(); \ + EASTL_ARCH_ATOMIC_ARM_STORE_64(type, ptr, val); \ + EASTL_ATOMIC_CPU_MB() + + + #endif + + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_ARM_STORE_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/arm/arch_arm_thread_fence.h b/external/EASTL/include/EASTL/internal/atomic/arch/arm/arch_arm_thread_fence.h new file mode 100644 index 00000000..391c64e0 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/arm/arch_arm_thread_fence.h @@ -0,0 +1,37 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_ARM_THREAD_FENCE_H +#define EASTL_ATOMIC_INTERNAL_ARCH_ARM_THREAD_FENCE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_THREAD_FENCE_*() +// +#if defined(EA_COMPILER_MSVC) + + #define EASTL_ARCH_ATOMIC_THREAD_FENCE_RELAXED() + + #define EASTL_ARCH_ATOMIC_THREAD_FENCE_ACQUIRE() \ + EASTL_ATOMIC_CPU_MB() + + #define EASTL_ARCH_ATOMIC_THREAD_FENCE_RELEASE() \ + EASTL_ATOMIC_CPU_MB() + + #define EASTL_ARCH_ATOMIC_THREAD_FENCE_ACQ_REL() \ + EASTL_ATOMIC_CPU_MB() + + #define EASTL_ARCH_ATOMIC_THREAD_FENCE_SEQ_CST() \ + EASTL_ATOMIC_CPU_MB() + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_ARM_THREAD_FENCE_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86.h b/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86.h new file mode 100644 index 00000000..77c383ab --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86.h @@ -0,0 +1,158 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_H +#define EASTL_ATOMIC_INTERNAL_ARCH_X86_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +/** + * x86 && x64 Mappings + * + * Load Relaxed : MOV + * Load Acquire : MOV; COMPILER_BARRIER; + * Load Seq_Cst : MOV; COMPILER_BARRIER; + * + * Store Relaxed : MOV + * Store Release : COMPILER_BARRIER; MOV; + * Store Seq_Cst : LOCK XCHG : MOV; MFENCE; + * + * Relaxed Fence : + * Acquire Fence : COMPILER_BARRIER + * Release Fence : COMPILER_BARRIER + * Acq_Rel Fence : COMPILER_BARRIER + * Seq_Cst FENCE : MFENCE + */ + + +///////////////////////////////////////////////////////////////////////////////// + +#if (defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64) + #define EASTL_ARCH_ATOMIC_HAS_128BIT +#elif defined(EA_COMPILER_MSVC) + #if EA_PLATFORM_PTR_SIZE == 8 + #define EASTL_ARCH_ATOMIC_HAS_128BIT + #endif +#endif + +///////////////////////////////////////////////////////////////////////////////// + + +/** + * NOTE: + * + * On 32-bit x86 CPUs Intel Pentium and newer, AMD K5 and newer + * and any i586 class of x86 CPUs support only 64-bit cmpxchg + * known as cmpxchg8b. + * + * On these class of cpus we can guarantee that 64-bit loads/stores are + * also atomic by using the SSE2 movq, SSE1 movlps, or x87 fild/fstp instructions. + * + * We support all other atomic operations + * on compilers that only provide this 64-bit cmpxchg instruction + * by wrapping them around the 64-bit cmpxchg8b instruction. + */ +#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86) + + + #define EASTL_ARCH_ATOMIC_X86_NOP_PRE_COMPUTE_DESIRED(ret, observed, val) \ + static_assert(false, "EASTL_ARCH_ATOMIC_X86_NOP_PRE_COMPUTE_DESIRED() must be implmented!"); + + #define EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET(ret, prevObserved, val) + + + #define EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, MemoryOrder, PRE_COMPUTE_DESIRED, POST_COMPUTE_RET) \ + { \ + EASTL_ATOMIC_DEFAULT_INIT(bool, cmpxchgRet); \ + EASTL_ATOMIC_LOAD_RELAXED_64(type, ret, ptr); \ + do \ + { \ + type computedDesired; \ + PRE_COMPUTE_DESIRED(computedDesired, ret, (val)); \ + EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_CMPXCHG_STRONG_, MemoryOrder), _64)(type, cmpxchgRet, ptr, &(ret), computedDesired); \ + } while (!cmpxchgRet); \ + POST_COMPUTE_RET(ret, ret, (val)); \ + } + + +#endif + + +/** + * NOTE: + * + * 64-bit x64 CPUs support only 128-bit cmpxchg known as cmpxchg16b. + * + * We support all other atomic operations by wrapping them around + * the 128-bit cmpxchg16b instruction. + * + * 128-bit loads are only atomic by using the cmpxchg16b instruction. + * SSE 128-bit loads are not guaranteed to be atomic even though some CPUs + * make them atomic such as AMD Ryzen or Intel SandyBridge. + */ +#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) + + + #define EASTL_ARCH_ATOMIC_X86_NOP_PRE_COMPUTE_DESIRED(ret, observed, val) \ + static_assert(false, "EASTL_ARCH_ATOMIC_X86_NOP_PRE_COMPUTE_DESIRED() must be implmented!"); + + #define EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET(ret, prevObserved, val) + + + #define EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, MemoryOrder, PRE_COMPUTE_DESIRED, POST_COMPUTE_RET) \ + { \ + EASTL_ATOMIC_DEFAULT_INIT(bool, cmpxchgRet); \ + /* This is intentionally a non-atomic 128-bit load which may observe shearing. */ \ + /* Either we do not observe *(ptr) but then the cmpxchg will fail and the observed */ \ + /* atomic load will be returned. Or the non-atomic load got lucky and the cmpxchg succeeds */ \ + /* because the observed value equals the value in *(ptr) thus we optimistically do a non-atomic load. */ \ + ret = *(ptr); \ + do \ + { \ + type computedDesired; \ + PRE_COMPUTE_DESIRED(computedDesired, ret, (val)); \ + EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_CMPXCHG_STRONG_, MemoryOrder), _128)(type, cmpxchgRet, ptr, &(ret), computedDesired); \ + } while (!cmpxchgRet); \ + POST_COMPUTE_RET(ret, ret, (val)); \ + } + + +#endif + + +///////////////////////////////////////////////////////////////////////////////// + + +#include "arch_x86_fetch_add.h" +#include "arch_x86_fetch_sub.h" + +#include "arch_x86_fetch_and.h" +#include "arch_x86_fetch_xor.h" +#include "arch_x86_fetch_or.h" + +#include "arch_x86_add_fetch.h" +#include "arch_x86_sub_fetch.h" + +#include "arch_x86_and_fetch.h" +#include "arch_x86_xor_fetch.h" +#include "arch_x86_or_fetch.h" + +#include "arch_x86_exchange.h" + +#include "arch_x86_cmpxchg_weak.h" +#include "arch_x86_cmpxchg_strong.h" + +#include "arch_x86_memory_barrier.h" + +#include "arch_x86_thread_fence.h" + +#include "arch_x86_load.h" +#include "arch_x86_store.h" + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_add_fetch.h b/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_add_fetch.h new file mode 100644 index 00000000..7b77528e --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_add_fetch.h @@ -0,0 +1,96 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_ADD_FETCH_H +#define EASTL_ATOMIC_INTERNAL_ARCH_X86_ADD_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_ADD_FETCH_*_N(type, type ret, type * ptr, type val) +// +#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86) + + + #define EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = ((observed) + (val)) + + #define EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \ + ret = ((prevObserved) + (val)) + + + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET) + + +#endif + + +#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) + + + #define EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = ((observed) + (val)) + + #define EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \ + ret = ((prevObserved) + (val)) + + + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET) + + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_ADD_FETCH_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_and_fetch.h b/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_and_fetch.h new file mode 100644 index 00000000..05831636 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_and_fetch.h @@ -0,0 +1,96 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_AND_FETCH_H +#define EASTL_ATOMIC_INTERNAL_ARCH_X86_AND_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_AND_FETCH_*_N(type, type ret, type * ptr, type val) +// +#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86) + + + #define EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = ((observed) & (val)) + + #define EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \ + ret = ((prevObserved) & (val)) + + + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET) + + +#endif + + +#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) + + + #define EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = ((observed) & (val)) + + #define EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \ + ret = ((prevObserved) & (val)) + + + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET) + + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_AND_FETCH_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_strong.h b/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_strong.h new file mode 100644 index 00000000..1968e9ab --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_strong.h @@ -0,0 +1,69 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_CMPXCHG_STRONG_H +#define EASTL_ATOMIC_INTERNAL_ARCH_X86_CMPXCHG_STRONG_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_*_*_N(type, bool ret, type * ptr, type * expected, type desired) +// +#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) + + + #define EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) \ + { \ + /* Compare RDX:RAX with m128. If equal, set ZF and load RCX:RBX into m128. Else, clear ZF and load m128 into RDX:RAX. */ \ + __asm__ __volatile__ ("lock; cmpxchg16b %2\n" /* cmpxchg16b sets/clears ZF */ \ + "sete %3" /* If ZF == 1, set the return value to 1 */ \ + /* Output Operands */ \ + : "=a"((EASTL_ATOMIC_TYPE_CAST(uint64_t, (expected)))[0]), "=d"((EASTL_ATOMIC_TYPE_CAST(uint64_t, (expected)))[1]), \ + "+m"(*(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(__uint128_t, (ptr)))), \ + "=rm"((ret)) \ + /* Input Operands */ \ + : "b"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(desired)))[0]), "c"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(desired)))[1]), \ + "a"((EASTL_ATOMIC_TYPE_CAST(uint64_t, (expected)))[0]), "d"((EASTL_ATOMIC_TYPE_CAST(uint64_t, (expected)))[1]) \ + /* Clobbers */ \ + : "memory", "cc"); \ + } + + + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) + + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) + + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) + + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) + + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) + + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) + + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) + + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) + + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) + + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_CMPXCHG_STRONG_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_weak.h b/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_weak.h new file mode 100644 index 00000000..61a126c1 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_weak.h @@ -0,0 +1,52 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_CMPXCHG_WEAK_H +#define EASTL_ATOMIC_INTERNAL_ARCH_X86_CMPXCHG_WEAK_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_*_*_N(type, bool ret, type * ptr, type * expected, type desired) +// +#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) + + + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) + + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired) + + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) + + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) + + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired) + + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) + + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired) + + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired) + + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) + + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_CMPXCHG_WEAK_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_exchange.h b/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_exchange.h new file mode 100644 index 00000000..b1de7d83 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_exchange.h @@ -0,0 +1,91 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_EXCHANGE_H +#define EASTL_ATOMIC_INTERNAL_ARCH_X86_EXCHANGE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_EXCHANGE_*_N(type, type ret, type * ptr, type val) +// +#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86) + + + #define EASTL_ARCH_ATOMIC_X86_EXCHANGE_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = (val) + + + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_EXCHANGE_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_EXCHANGE_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_EXCHANGE_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_EXCHANGE_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_EXCHANGE_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + +#endif + + +#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) + + + #define EASTL_ARCH_ATOMIC_X86_EXCHANGE_128(type, ret, ptr, val, MemoryOrder) \ + { \ + EASTL_ATOMIC_DEFAULT_INIT(bool, cmpxchgRet); \ + /* This is intentionally a non-atomic 128-bit load which may observe shearing. */ \ + /* Either we do not observe *(ptr) but then the cmpxchg will fail and the observed */ \ + /* atomic load will be returned. Or the non-atomic load got lucky and the cmpxchg succeeds */ \ + /* because the observed value equals the value in *(ptr) thus we optimistically do a non-atomic load. */ \ + ret = *(ptr); \ + do \ + { \ + EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_CMPXCHG_STRONG_, MemoryOrder), _128)(type, cmpxchgRet, ptr, &(ret), val); \ + } while (!cmpxchgRet); \ + } + + + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_EXCHANGE_128(type, ret, ptr, val, RELAXED) + + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_EXCHANGE_128(type, ret, ptr, val, ACQUIRE) + + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_EXCHANGE_128(type, ret, ptr, val, RELEASE) + + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_EXCHANGE_128(type, ret, ptr, val, ACQ_REL) + + #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_EXCHANGE_128(type, ret, ptr, val, SEQ_CST) + + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_EXCHANGE_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_add.h b/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_add.h new file mode 100644 index 00000000..e816af9b --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_add.h @@ -0,0 +1,90 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_ADD_H +#define EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_ADD_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_FETCH_ADD_*_N(type, type ret, type * ptr, type val) +// +#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86) + + + #define EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = ((observed) + (val)) + + + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + +#endif + + +#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) + + + #define EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = ((observed) + (val)) + + + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_ADD_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_and.h b/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_and.h new file mode 100644 index 00000000..ff27b1a2 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_and.h @@ -0,0 +1,90 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_AND_H +#define EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_AND_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_FETCH_AND_*_N(type, type ret, type * ptr, type val) +// +#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86) + + + #define EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = ((observed) & (val)) + + + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + +#endif + + +#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) + + + #define EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = ((observed) & (val)) + + + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_AND_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_or.h b/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_or.h new file mode 100644 index 00000000..8627d3a2 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_or.h @@ -0,0 +1,90 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_OR_H +#define EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_OR_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_FETCH_OR_*_N(type, type ret, type * ptr, type val) +// +#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86) + + + #define EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = ((observed) | (val)) + + + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + +#endif + + +#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) + + + #define EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = ((observed) | (val)) + + + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_OR_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_sub.h b/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_sub.h new file mode 100644 index 00000000..14b43f90 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_sub.h @@ -0,0 +1,90 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_SUB_H +#define EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_SUB_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_FETCH_SUB_*_N(type, type ret, type * ptr, type val) +// +#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86) + + + #define EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = ((observed) - (val)) + + + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + +#endif + + +#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) + + + #define EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = ((observed) - (val)) + + + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_SUB_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_xor.h b/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_xor.h new file mode 100644 index 00000000..666df8bf --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_xor.h @@ -0,0 +1,90 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_XOR_H +#define EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_XOR_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_FETCH_XOR_*_N(type, type ret, type * ptr, type val) +// +#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86) + + + #define EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = ((observed) ^ (val)) + + + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + +#endif + + +#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) + + + #define EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = ((observed) ^ (val)) + + + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_XOR_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_load.h b/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_load.h new file mode 100644 index 00000000..644a2a17 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_load.h @@ -0,0 +1,164 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_LOAD_H +#define EASTL_ATOMIC_INTERNAL_ARCH_X86_LOAD_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_LOAD_*_N(type, type ret, type * ptr) +// + +#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) + + + /** + * NOTE: + * + * Since the cmpxchg 128-bit inline assembly does a sete in the asm to set the return boolean, + * it doesn't get dead-store removed even though we don't care about the success of the + * cmpxchg since the compiler cannot reason about what is inside asm blocks. + * Thus this variant just does the minimum required to do an atomic load. + */ +#define EASTL_ARCH_ATOMIC_X86_LOAD_128(type, ret, ptr, MemoryOrder) \ + { \ + EASTL_ATOMIC_FIXED_WIDTH_TYPE_128 expected = 0; \ + ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, expected); \ + \ + /* Compare RDX:RAX with m128. If equal, set ZF and load RCX:RBX into m128. Else, clear ZF and load m128 into RDX:RAX. */ \ + __asm__ __volatile__ ("lock; cmpxchg16b %2" /* cmpxchg16b sets/clears ZF */ \ + /* Output Operands */ \ + : "=a"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(ret)))[0]), "=d"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(ret)))[1]), \ + "+m"(*(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(__uint128_t, (ptr)))) \ + /* Input Operands */ \ + : "b"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(ret)))[0]), "c"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(ret)))[1]), \ + "a"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(ret)))[0]), "d"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(ret)))[1]) \ + /* Clobbers */ \ + : "memory", "cc"); \ + } + + +#define EASTL_ARCH_ATOMIC_LOAD_RELAXED_128(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X86_LOAD_128(type, ret, ptr, RELAXED) + +#define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_128(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X86_LOAD_128(type, ret, ptr, ACQUIRE) + +#define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_128(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X86_LOAD_128(type, ret, ptr, SEQ_CST) + +#elif defined(EA_COMPILER_MSVC) + + + #if defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1920) // >= VS2019 + + #define EASTL_ARCH_ATOMIC_X86_LOAD_N(integralType, bits, type, ret, ptr) \ + { \ + integralType retIntegral; \ + retIntegral = EA_PREPROCESSOR_JOIN(__iso_volatile_load, bits)(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr))); \ + \ + ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, retIntegral); \ + } + + #else + + #define EASTL_ARCH_ATOMIC_X86_LOAD_N(integralType, bits, type, ret, ptr) \ + { \ + integralType retIntegral; \ + retIntegral = (*(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)))); \ + \ + ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, retIntegral); \ + } + + #endif + + + #define EASTL_ARCH_ATOMIC_X86_LOAD_128(type, ret, ptr, MemoryOrder) \ + { \ + EASTL_ATOMIC_FIXED_WIDTH_TYPE_128 expected{0, 0}; \ + ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, expected); \ + \ + bool cmpxchgRetBool; EA_UNUSED(cmpxchgRetBool); \ + EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_CMPXCHG_STRONG_, MemoryOrder), _128)(type, cmpxchgRetBool, ptr, &(ret), ret); \ + } + + + #define EASTL_ARCH_ATOMIC_X86_LOAD_8(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X86_LOAD_N(__int8, 8, type, ret, ptr) + + #define EASTL_ARCH_ATOMIC_X86_LOAD_16(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X86_LOAD_N(__int16, 16, type, ret, ptr) + + #define EASTL_ARCH_ATOMIC_X86_LOAD_32(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X86_LOAD_N(__int32, 32, type, ret, ptr) + + #define EASTL_ARCH_ATOMIC_X86_LOAD_64(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X86_LOAD_N(__int64, 64, type, ret, ptr) + + + #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_8(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X86_LOAD_8(type, ret, ptr) + + #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_16(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X86_LOAD_16(type, ret, ptr) + + #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_32(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X86_LOAD_32(type, ret, ptr) + + #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_64(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X86_LOAD_64(type, ret, ptr) + + #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_128(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X86_LOAD_128(type, ret, ptr, RELAXED) + + + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_8(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X86_LOAD_8(type, ret, ptr); \ + EASTL_ATOMIC_COMPILER_BARRIER() + + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_16(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X86_LOAD_16(type, ret, ptr); \ + EASTL_ATOMIC_COMPILER_BARRIER() + + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_32(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X86_LOAD_32(type, ret, ptr); \ + EASTL_ATOMIC_COMPILER_BARRIER() + + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_64(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X86_LOAD_64(type, ret, ptr); \ + EASTL_ATOMIC_COMPILER_BARRIER() + + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_128(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X86_LOAD_128(type, ret, ptr, ACQUIRE) + + + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_8(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X86_LOAD_8(type, ret, ptr); \ + EASTL_ATOMIC_COMPILER_BARRIER() + + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_16(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X86_LOAD_16(type, ret, ptr); \ + EASTL_ATOMIC_COMPILER_BARRIER() + + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_32(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X86_LOAD_32(type, ret, ptr); \ + EASTL_ATOMIC_COMPILER_BARRIER() + + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_64(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X86_LOAD_64(type, ret, ptr); \ + EASTL_ATOMIC_COMPILER_BARRIER() + + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_128(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X86_LOAD_128(type, ret, ptr, SEQ_CST) + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_LOAD_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_memory_barrier.h b/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_memory_barrier.h new file mode 100644 index 00000000..7bad141f --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_memory_barrier.h @@ -0,0 +1,104 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_MEMORY_BARRIER_H +#define EASTL_ATOMIC_INTERNAL_ARCH_X86_MEMORY_BARRIER_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_CPU_MB() +// +#if defined(EA_COMPILER_MSVC) + + /** + * NOTE: + * While it makes no sense for a hardware memory barrier to not imply a compiler barrier. + * MSVC docs do not explicitly state that, so better to be safe than sorry chasing down + * hard to find bugs due to the compiler deciding to reorder things. + */ + + #if 1 + + // 4459 : declaration of 'identifier' hides global declaration + // 4456 : declaration of 'identifier' hides previous local declaration + #define EASTL_ARCH_ATOMIC_CPU_MB() \ + { \ + EA_DISABLE_VC_WARNING(4459 4456); \ + volatile long _; \ + _InterlockedExchangeAdd(&_, 0); \ + EA_RESTORE_VC_WARNING(); \ + } + + #else + + #define EASTL_ARCH_ATOMIC_CPU_MB() \ + EASTL_ATOMIC_COMPILER_BARRIER(); \ + _mm_mfence(); \ + EASTL_ATOMIC_COMPILER_BARRIER() + + #endif + +#elif defined(__clang__) || defined(EA_COMPILER_GNUC) + + /** + * NOTE: + * + * mfence orders all loads/stores to/from all memory types. + * We only care about ordinary cacheable memory so lighter weight locked instruction + * is far faster than a mfence to get a full memory barrier. + * lock; addl against the top of the stack is good because: + * distinct for every thread so prevents false sharing + * that cacheline is most likely cache hot + * + * We intentionally do it below the stack pointer to avoid false RAW register dependencies, + * in cases where the compiler reads from the stack pointer after the lock; addl instruction + * + * Accounting for Red Zones or Cachelines doesn't provide extra benefit. + */ + + #if defined(EA_PROCESSOR_X86) + + #define EASTL_ARCH_ATOMIC_CPU_MB() \ + __asm__ __volatile__ ("lock; addl $0, -4(%%esp)" ::: "memory", "cc") + + #elif defined(EA_PROCESSOR_X86_64) + + #define EASTL_ARCH_ATOMIC_CPU_MB() \ + __asm__ __volatile__ ("lock; addl $0, -8(%%rsp)" ::: "memory", "cc") + + #else + + #define EASTL_ARCH_ATOMIC_CPU_MB() \ + __asm__ __volatile__ ("mfence" ::: "memory") + + #endif + + +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_CPU_WMB() +// +#define EASTL_ARCH_ATOMIC_CPU_WMB() \ + EASTL_ATOMIC_COMPILER_BARRIER() + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_CPU_RMB() +// +#define EASTL_ARCH_ATOMIC_CPU_RMB() \ + EASTL_ATOMIC_COMPILER_BARRIER() + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_MEMORY_BARRIER_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_or_fetch.h b/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_or_fetch.h new file mode 100644 index 00000000..42f7d61f --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_or_fetch.h @@ -0,0 +1,96 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_OR_FETCH_H +#define EASTL_ATOMIC_INTERNAL_ARCH_X86_OR_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_OR_FETCH_*_N(type, type ret, type * ptr, type val) +// +#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86) + + + #define EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = ((observed) | (val)) + + #define EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \ + ret = ((prevObserved) | (val)) + + + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET) + + +#endif + + +#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) + + + #define EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = ((observed) | (val)) + + #define EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \ + ret = ((prevObserved) | (val)) + + + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET) + + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_OR_FETCH_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_store.h b/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_store.h new file mode 100644 index 00000000..31655c3b --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_store.h @@ -0,0 +1,171 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_STORE_H +#define EASTL_ATOMIC_INTERNAL_ARCH_X86_STORE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_STORE_*_N(type, type * ptr, type val) +// +#if defined(EA_COMPILER_MSVC) + + + #if defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1920) // >= VS2019 + + #define EASTL_ARCH_ATOMIC_X86_STORE_N(integralType, bits, type, ptr, val) \ + EA_PREPROCESSOR_JOIN(__iso_volatile_store, bits)(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), EASTL_ATOMIC_TYPE_PUN_CAST(integralType, (val))) + + #else + + #define EASTL_ARCH_ATOMIC_X86_STORE_N(integralType, bits, type, ptr, val) \ + { \ + integralType valIntegral = EASTL_ATOMIC_TYPE_PUN_CAST(integralType, (val)); \ + \ + (*(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)))) = valIntegral; \ + } + + #endif + + + #define EASTL_ARCH_ATOMIC_X86_STORE_128(type, ptr, val, MemoryOrder) \ + { \ + type exchange128; EA_UNUSED(exchange128); \ + EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_EXCHANGE_, MemoryOrder), _128)(type, exchange128, ptr, val); \ + } + + + #define EASTL_ARCH_ATOMIC_X86_STORE_8(type, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_STORE_N(__int8, 8, type, ptr, val) + + #define EASTL_ARCH_ATOMIC_X86_STORE_16(type, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_STORE_N(__int16, 16, type, ptr, val) + + #define EASTL_ARCH_ATOMIC_X86_STORE_32(type, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_STORE_N(__int32, 32, type, ptr, val) + + #define EASTL_ARCH_ATOMIC_X86_STORE_64(type, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_STORE_N(__int64, 64, type, ptr, val) + + + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_8(type, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_STORE_8(type, ptr, val) + + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_16(type, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_STORE_16(type, ptr, val) + + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_32(type, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_STORE_32(type, ptr, val) + + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_64(type, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_STORE_64(type, ptr, val) + + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_128(type, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_STORE_128(type, ptr, val, RELAXED) + + + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_8(type, ptr, val) \ + EASTL_ATOMIC_COMPILER_BARRIER(); \ + EASTL_ARCH_ATOMIC_X86_STORE_8(type, ptr, val) + + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_16(type, ptr, val) \ + EASTL_ATOMIC_COMPILER_BARRIER(); \ + EASTL_ARCH_ATOMIC_X86_STORE_16(type, ptr, val) + + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_32(type, ptr, val) \ + EASTL_ATOMIC_COMPILER_BARRIER(); \ + EASTL_ARCH_ATOMIC_X86_STORE_32(type, ptr, val) + + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_64(type, ptr, val) \ + EASTL_ATOMIC_COMPILER_BARRIER(); \ + EASTL_ARCH_ATOMIC_X86_STORE_64(type, ptr, val) + + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_128(type, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_STORE_128(type, ptr, val, RELEASE) + + + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_8(type, ptr, val) \ + { \ + type exchange8; EA_UNUSED(exchange8); \ + EASTL_ATOMIC_EXCHANGE_SEQ_CST_8(type, exchange8, ptr, val); \ + } + + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_16(type, ptr, val) \ + { \ + type exchange16; EA_UNUSED(exchange16); \ + EASTL_ATOMIC_EXCHANGE_SEQ_CST_16(type, exchange16, ptr, val); \ + } + + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_32(type, ptr, val) \ + { \ + type exchange32; EA_UNUSED(exchange32); \ + EASTL_ATOMIC_EXCHANGE_SEQ_CST_32(type, exchange32, ptr, val); \ + } + + + /** + * NOTE: + * + * Since 64-bit exchange is wrapped around a cmpxchg8b on 32-bit x86, it is + * faster to just do a mov; mfence. + */ + #if defined(EA_PROCESSOR_X86) + + + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_64(type, ptr, val) \ + EASTL_ATOMIC_COMPILER_BARRIER(); \ + EASTL_ARCH_ATOMIC_X86_STORE_64(type, ptr, val); \ + EASTL_ATOMIC_CPU_MB() + + + #elif defined(EA_PROCESSOR_X86_64) + + + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_64(type, ptr, val) \ + { \ + type exchange64; EA_UNUSED(exchange64); \ + EASTL_ATOMIC_EXCHANGE_SEQ_CST_64(type, exchange64, ptr, val); \ + } + + + #endif + + + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_128(type, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_STORE_128(type, ptr, val, SEQ_CST) + + +#endif + + +#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) + + + #define EASTL_ARCH_ATOMIC_X86_STORE_128(type, ptr, val, MemoryOrder) \ + { \ + type exchange128; EA_UNUSED(exchange128); \ + EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_EXCHANGE_, MemoryOrder), _128)(type, exchange128, ptr, val); \ + } + + + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_128(type, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_STORE_128(type, ptr, val, RELAXED) + + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_128(type, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_STORE_128(type, ptr, val, RELEASE) + + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_128(type, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_STORE_128(type, ptr, val, SEQ_CST) + + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_STORE_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_sub_fetch.h b/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_sub_fetch.h new file mode 100644 index 00000000..a1d09329 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_sub_fetch.h @@ -0,0 +1,96 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_SUB_FETCH_H +#define EASTL_ATOMIC_INTERNAL_ARCH_X86_SUB_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_SUB_FETCH_*_N(type, type ret, type * ptr, type val) +// +#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86) + + + #define EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = ((observed) - (val)) + + #define EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \ + ret = ((prevObserved) - (val)) + + + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET) + + +#endif + + +#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) + + + #define EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = ((observed) - (val)) + + #define EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \ + ret = ((prevObserved) - (val)) + + + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET) + + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_SUB_FETCH_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_thread_fence.h b/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_thread_fence.h new file mode 100644 index 00000000..183c7f3a --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_thread_fence.h @@ -0,0 +1,42 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_THREAD_FENCE_H +#define EASTL_ATOMIC_INTERNAL_ARCH_X86_THREAD_FENCE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_THREAD_FENCE_*() +// +#if defined(EA_COMPILER_MSVC) + + #define EASTL_ARCH_ATOMIC_THREAD_FENCE_RELAXED() + + #define EASTL_ARCH_ATOMIC_THREAD_FENCE_ACQUIRE() \ + EASTL_ATOMIC_COMPILER_BARRIER() + + #define EASTL_ARCH_ATOMIC_THREAD_FENCE_RELEASE() \ + EASTL_ATOMIC_COMPILER_BARRIER() + + #define EASTL_ARCH_ATOMIC_THREAD_FENCE_ACQ_REL() \ + EASTL_ATOMIC_COMPILER_BARRIER() + +#endif + + +#if defined(EA_COMPILER_MSVC) || defined(__clang__) || defined(EA_COMPILER_GNUC) + + #define EASTL_ARCH_ATOMIC_THREAD_FENCE_SEQ_CST() \ + EASTL_ATOMIC_CPU_MB() + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_THREAD_FENCE_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_xor_fetch.h b/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_xor_fetch.h new file mode 100644 index 00000000..a5b62c3b --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/arch/x86/arch_x86_xor_fetch.h @@ -0,0 +1,96 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_XOR_FETCH_H +#define EASTL_ATOMIC_INTERNAL_ARCH_X86_XOR_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_XOR_FETCH_*_N(type, type ret, type * ptr, type val) +// +#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86) + + + #define EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = ((observed) ^ (val)) + + #define EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \ + ret = ((prevObserved) ^ (val)) + + + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET) + + +#endif + + +#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) + + + #define EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = ((observed) ^ (val)) + + #define EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \ + ret = ((prevObserved) ^ (val)) + + + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET) + + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_XOR_FETCH_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/atomic.h b/external/EASTL/include/EASTL/internal/atomic/atomic.h new file mode 100644 index 00000000..7dcd10f7 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/atomic.h @@ -0,0 +1,260 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_H +#define EASTL_ATOMIC_INTERNAL_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#include +#include +#include +#include + +#include "atomic_macros.h" +#include "atomic_casts.h" + +#include "atomic_memory_order.h" +#include "atomic_asserts.h" + +#include "atomic_size_aligned.h" +#include "atomic_base_width.h" + +#include "atomic_integral.h" + +#include "atomic_pointer.h" + + +///////////////////////////////////////////////////////////////////////////////// + + +/** + * NOTE: + * + * All of the actual implementation is done via the ATOMIC_MACROS in the compiler or arch sub folders. + * The C++ code is merely boilerplate around these macros that actually implement the atomic operations. + * The C++ boilerplate is also hidden behind macros. + * This may seem more complicated but this is all meant to reduce copy-pasting and to ensure all operations + * all end up going down to one macro that does the actual implementation. + * The reduced code duplication makes it easier to verify the implementation and reason about it. + * Ensures we do not have to re-implement the same code for compilers that do not support generic builtins such as MSVC. + * Ensures for compilers that have separate intrinsics for different widths, that C++ boilerplate isn't copy-pasted leading to programmer errors. + * Ensures if we ever have to implement a new platform, only the low-level leaf macros have to be implemented, everything else will be generated for you. + */ + + +namespace eastl +{ + + +namespace internal +{ + + +// 'class' : multiple assignment operators specified +EA_DISABLE_VC_WARNING(4522); + +// misaligned atomic operation may incur significant performance penalty +// The above warning is emitted in earlier versions of clang incorrectly. +// All eastl::atomic objects are size aligned. +// This is static and runtime asserted. +// Thus we disable this warning. +EA_DISABLE_CLANG_WARNING(-Watomic-alignment); + + + template + struct is_atomic_lockfree_size + { + static EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR_OR_CONST bool value = false || + #if defined(EASTL_ATOMIC_HAS_8BIT) + sizeof(T) == 1 || + #endif + #if defined(EASTL_ATOMIC_HAS_16BIT) + sizeof(T) == 2 || + #endif + #if defined(EASTL_ATOMIC_HAS_32BIT) + sizeof(T) == 4 || + #endif + #if defined(EASTL_ATOMIC_HAS_64BIT) + sizeof(T) == 8 || + #endif + #if defined(EASTL_ATOMIC_HAS_128BIT) + sizeof(T) == 16 || + #endif + false; + }; + + + template + struct is_user_type_suitable_for_primary_template + { + static EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR_OR_CONST bool value = eastl::internal::is_atomic_lockfree_size::value; + }; + + + template + using select_atomic_inherit_0 = typename eastl::conditional || eastl::internal::is_user_type_suitable_for_primary_template::value, + eastl::internal::atomic_base_width, /* True */ + eastl::internal::atomic_invalid_type /* False */ + >::type; + + template + using select_atomic_inherit = select_atomic_inherit_0; + + +} // namespace internal + + +#define EASTL_ATOMIC_CLASS_IMPL(type, base, valueType, differenceType) \ + private: \ + \ + EASTL_ATOMIC_STATIC_ASSERT_TYPE(type); \ + \ + using Base = base; \ + \ + public: \ + \ + typedef valueType value_type; \ + typedef differenceType difference_type; \ + \ + public: \ + \ + static EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR_OR_CONST bool is_always_lock_free = eastl::internal::is_atomic_lockfree_size::value; \ + \ + public: /* deleted ctors && assignment operators */ \ + \ + atomic(const atomic&) EA_NOEXCEPT = delete; \ + \ + atomic& operator=(const atomic&) EA_NOEXCEPT = delete; \ + atomic& operator=(const atomic&) volatile EA_NOEXCEPT = delete; \ + \ + public: /* ctors */ \ + \ + EA_CONSTEXPR atomic(type desired) EA_NOEXCEPT \ + : Base{ desired } \ + { \ + } \ + \ + EA_CONSTEXPR atomic() EA_NOEXCEPT_IF(eastl::is_nothrow_default_constructible_v) = default; \ + \ + public: \ + \ + bool is_lock_free() const EA_NOEXCEPT \ + { \ + return eastl::internal::is_atomic_lockfree_size::value; \ + } \ + \ + bool is_lock_free() const volatile EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(type); \ + return false; \ + } + + +#define EASTL_ATOMIC_USING_ATOMIC_BASE(type) \ + public: \ + \ + using Base::operator=; \ + using Base::store; \ + using Base::load; \ + using Base::exchange; \ + using Base::compare_exchange_weak; \ + using Base::compare_exchange_strong; \ + \ + public: \ + \ + operator type() const volatile EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \ + } \ + \ + operator type() const EA_NOEXCEPT \ + { \ + return load(eastl::memory_order_seq_cst); \ + } + + +#define EASTL_ATOMIC_USING_ATOMIC_INTEGRAL() \ + public: \ + \ + using Base::fetch_add; \ + using Base::add_fetch; \ + \ + using Base::fetch_sub; \ + using Base::sub_fetch; \ + \ + using Base::fetch_and; \ + using Base::and_fetch; \ + \ + using Base::fetch_or; \ + using Base::or_fetch; \ + \ + using Base::fetch_xor; \ + using Base::xor_fetch; \ + \ + using Base::operator++; \ + using Base::operator--; \ + using Base::operator+=; \ + using Base::operator-=; \ + using Base::operator&=; \ + using Base::operator|=; \ + using Base::operator^=; + + +#define EASTL_ATOMIC_USING_ATOMIC_POINTER() \ + public: \ + \ + using Base::fetch_add; \ + using Base::add_fetch; \ + using Base::fetch_sub; \ + using Base::sub_fetch; \ + \ + using Base::operator++; \ + using Base::operator--; \ + using Base::operator+=; \ + using Base::operator-=; + + +template +struct atomic : protected eastl::internal::select_atomic_inherit +{ + EASTL_ATOMIC_CLASS_IMPL(T, eastl::internal::select_atomic_inherit, T, T) + + EASTL_ATOMIC_USING_ATOMIC_BASE(T) +}; + + +template +struct atomic && !eastl::is_same_v>> : protected eastl::internal::atomic_integral_width +{ + EASTL_ATOMIC_CLASS_IMPL(T, eastl::internal::atomic_integral_width, T, T) + + EASTL_ATOMIC_USING_ATOMIC_BASE(T) + + EASTL_ATOMIC_USING_ATOMIC_INTEGRAL() +}; + + +template +struct atomic : protected eastl::internal::atomic_pointer_width +{ + EASTL_ATOMIC_CLASS_IMPL(T*, eastl::internal::atomic_pointer_width, T*, ptrdiff_t) + + EASTL_ATOMIC_USING_ATOMIC_BASE(T*) + + EASTL_ATOMIC_USING_ATOMIC_POINTER() +}; + + +EA_RESTORE_VC_WARNING(); + +EA_RESTORE_CLANG_WARNING(); + +} // namespace eastl + +#endif /* EASTL_ATOMIC_INTERNAL_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/atomic_asserts.h b/external/EASTL/include/EASTL/internal/atomic/atomic_asserts.h new file mode 100644 index 00000000..9324a479 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/atomic_asserts.h @@ -0,0 +1,75 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_STATIC_ASSERTS_H +#define EASTL_ATOMIC_INTERNAL_STATIC_ASSERTS_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(type) \ + static_assert(!eastl::is_same::value, "eastl::atomic : volatile eastl::atomic is not what you expect! Read the docs in EASTL/atomic.h! Use the memory orders to access the atomic object!"); + +#define EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(type) \ + static_assert(!eastl::is_same::value, "eastl::atomic : invalid memory order for the given operation!"); + +#define EASTL_ATOMIC_STATIC_ASSERT_TYPE(type) \ + /* User Provided T must not be cv qualified */ \ + static_assert(!eastl::is_const::value, "eastl::atomic : Template Typename T cannot be const!"); \ + static_assert(!eastl::is_volatile::value, "eastl::atomic : Template Typename T cannot be volatile! Use the memory orders to access the underlying type for the guarantees you need."); \ + /* T must satisfy StandardLayoutType */ \ + static_assert(eastl::is_standard_layout::value, "eastl::atomic : Must have standard layout!"); \ + /* T must be TriviallyCopyable but it does not have to be TriviallyConstructible */ \ + static_assert(eastl::is_trivially_copyable::value, "eastl::atomci : Template Typename T must be trivially copyable!"); \ + static_assert(eastl::is_copy_constructible::value, "eastl::atomic : Template Typename T must be copy constructible!"); \ + static_assert(eastl::is_move_constructible::value, "eastl::atomic : Template Typename T must be move constructible!"); \ + static_assert(eastl::is_copy_assignable::value, "eastl::atomic : Template Typename T must be copy assignable!"); \ + static_assert(eastl::is_move_assignable::value, "eastl::atomic : Template Typename T must be move assignable!"); \ + static_assert(eastl::is_trivially_destructible::value, "eastl::atomic : Must be trivially destructible!"); \ + static_assert(eastl::internal::is_atomic_lockfree_size::value, "eastl::atomic : Template Typename T must be a lockfree size!"); + +#define EASTL_ATOMIC_STATIC_ASSERT_TYPE_IS_OBJECT(type) \ + static_assert(eastl::is_object::value, "eastl::atomic : Template Typename T must be an object type!"); + +#define EASTL_ATOMIC_ASSERT_ALIGNED(alignment) \ + EASTL_ASSERT((alignment & (alignment - 1)) == 0); \ + EASTL_ASSERT((reinterpret_cast(this) & (alignment - 1)) == 0) + + +namespace eastl +{ + + +namespace internal +{ + + + template + struct atomic_invalid_type + { + /** + * class Test { int i; int j; int k; }; sizeof(Test) == 96 bits + * + * std::atomic allows non-primitive types to be used for the template type. + * This causes the api to degrade to locking for types that cannot fit into the lockfree size + * of the target platform such as std::atomic leading to performance traps. + * + * If this static_assert() fired, this means your template type T is larger than any atomic instruction + * supported on the given platform. + */ + + static_assert(!eastl::is_same::value, "eastl::atomic : invalid template type T!"); + }; + + +} // namespace internal + + +} // namespace eastl + + +#endif /* EASTL_ATOMIC_INTERNAL_STATIC_ASSERTS_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/atomic_base_width.h b/external/EASTL/include/EASTL/internal/atomic/atomic_base_width.h new file mode 100644 index 00000000..1a66d8ae --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/atomic_base_width.h @@ -0,0 +1,354 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_BASE_WIDTH_H +#define EASTL_ATOMIC_INTERNAL_BASE_WIDTH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +namespace eastl +{ + + +namespace internal +{ + + +// 'class' : multiple assignment operators specified +EA_DISABLE_VC_WARNING(4522); + +// misaligned atomic operation may incur significant performance penalty +// The above warning is emitted in earlier versions of clang incorrectly. +// All eastl::atomic objects are size aligned. +// This is static and runtime asserted. +// Thus we disable this warning. +EA_DISABLE_CLANG_WARNING(-Watomic-alignment); + + + template + struct atomic_base_width; + + /** + * NOTE: + * + * T does not have to be trivially default constructible but it still + * has to be a trivially copyable type for the primary atomic template. + * Thus we must type pun into whatever storage type of the given fixed width + * the platform designates. This ensures T does not have to be trivially constructible. + */ + +#define EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits) \ + EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_FIXED_WIDTH_TYPE_, bits) + + +#define EASTL_ATOMIC_STORE_FUNC_IMPL(op, bits) \ + EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits) fixedWidthDesired = EASTL_ATOMIC_TYPE_PUN_CAST(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), desired); \ + EA_PREPROCESSOR_JOIN(op, bits)(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), \ + EASTL_ATOMIC_TYPE_CAST(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), this->GetAtomicAddress()), \ + fixedWidthDesired) + + +#define EASTL_ATOMIC_LOAD_FUNC_IMPL(op, bits) \ + EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits) retVal; \ + EA_PREPROCESSOR_JOIN(op, bits)(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), \ + retVal, \ + EASTL_ATOMIC_TYPE_CAST(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), this->GetAtomicAddress())); \ + return EASTL_ATOMIC_TYPE_PUN_CAST(T, retVal); + + +#define EASTL_ATOMIC_EXCHANGE_FUNC_IMPL(op, bits) \ + EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits) retVal; \ + EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits) fixedWidthDesired = EASTL_ATOMIC_TYPE_PUN_CAST(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), desired); \ + EA_PREPROCESSOR_JOIN(op, bits)(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), \ + retVal, \ + EASTL_ATOMIC_TYPE_CAST(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), this->GetAtomicAddress()), \ + fixedWidthDesired); \ + return EASTL_ATOMIC_TYPE_PUN_CAST(T, retVal); + + +#define EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(op, bits) \ + EASTL_ATOMIC_DEFAULT_INIT(bool, retVal); \ + EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits) fixedWidthDesired = EASTL_ATOMIC_TYPE_PUN_CAST(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), desired); \ + EA_PREPROCESSOR_JOIN(op, bits)(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), \ + retVal, \ + EASTL_ATOMIC_TYPE_CAST(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), this->GetAtomicAddress()), \ + EASTL_ATOMIC_TYPE_CAST(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), &expected), \ + fixedWidthDesired); \ + return retVal; + + +#define EASTL_ATOMIC_BASE_OP_JOIN(op, Order) \ + EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_, op), Order) + + +#define EASTL_ATOMIC_BASE_CMPXCHG_FUNCS_IMPL(funcName, cmpxchgOp, bits) \ + using Base::funcName; \ + \ + bool funcName(T& expected, T desired) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _SEQ_CST_), bits); \ + } \ + \ + bool funcName(T& expected, T desired, \ + eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _RELAXED_), bits); \ + } \ + \ + bool funcName(T& expected, T desired, \ + eastl::internal::memory_order_acquire_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _ACQUIRE_), bits); \ + } \ + \ + bool funcName(T& expected, T desired, \ + eastl::internal::memory_order_release_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _RELEASE_), bits); \ + } \ + \ + bool funcName(T& expected, T desired, \ + eastl::internal::memory_order_acq_rel_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _ACQ_REL_), bits); \ + } \ + \ + bool funcName(T& expected, T desired, \ + eastl::internal::memory_order_seq_cst_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _SEQ_CST_), bits); \ + } \ + \ + bool funcName(T& expected, T desired, \ + eastl::internal::memory_order_relaxed_s, \ + eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _RELAXED_RELAXED_), bits); \ + } \ + \ + bool funcName(T& expected, T desired, \ + eastl::internal::memory_order_acquire_s, \ + eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _ACQUIRE_RELAXED_), bits); \ + } \ + \ + bool funcName(T& expected, T desired, \ + eastl::internal::memory_order_acquire_s, \ + eastl::internal::memory_order_acquire_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _ACQUIRE_ACQUIRE_), bits); \ + } \ + \ + bool funcName(T& expected, T desired, \ + eastl::internal::memory_order_release_s, \ + eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _RELEASE_RELAXED_), bits); \ + } \ + \ + bool funcName(T& expected, T desired, \ + eastl::internal::memory_order_acq_rel_s, \ + eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _ACQ_REL_RELAXED_), bits); \ + } \ + \ + bool funcName(T& expected, T desired, \ + eastl::internal::memory_order_acq_rel_s, \ + eastl::internal::memory_order_acquire_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _ACQ_REL_ACQUIRE_), bits); \ + } \ + \ + bool funcName(T& expected, T desired, \ + eastl::internal::memory_order_seq_cst_s, \ + eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _SEQ_CST_RELAXED_), bits); \ + } \ + \ + bool funcName(T& expected, T desired, \ + eastl::internal::memory_order_seq_cst_s, \ + eastl::internal::memory_order_acquire_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _SEQ_CST_ACQUIRE_), bits); \ + } \ + \ + bool funcName(T& expected, T desired, \ + eastl::internal::memory_order_seq_cst_s, \ + eastl::internal::memory_order_seq_cst_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _SEQ_CST_SEQ_CST_), bits); \ + } + +#define EASTL_ATOMIC_BASE_CMPXCHG_WEAK_FUNCS_IMPL(bits) \ + EASTL_ATOMIC_BASE_CMPXCHG_FUNCS_IMPL(compare_exchange_weak, CMPXCHG_WEAK, bits) + +#define EASTL_ATOMIC_BASE_CMPXCHG_STRONG_FUNCS_IMPL(bits) \ + EASTL_ATOMIC_BASE_CMPXCHG_FUNCS_IMPL(compare_exchange_strong, CMPXCHG_STRONG, bits) + + +#define EASTL_ATOMIC_BASE_WIDTH_SPECIALIZE(bytes, bits) \ + template \ + struct atomic_base_width : public atomic_size_aligned \ + { \ + private: \ + \ + static_assert(EA_ALIGN_OF(atomic_size_aligned) == bytes, "eastl::atomic must be sizeof(T) aligned!"); \ + static_assert(EA_ALIGN_OF(atomic_size_aligned) == sizeof(T), "eastl::atomic must be sizeof(T) aligned!"); \ + using Base = atomic_size_aligned; \ + \ + public: /* ctors */ \ + \ + EA_CONSTEXPR atomic_base_width(T desired) EA_NOEXCEPT \ + : Base{ desired } \ + { \ + } \ + \ + EA_CONSTEXPR atomic_base_width() EA_NOEXCEPT_IF(eastl::is_nothrow_default_constructible_v) = default; \ + \ + atomic_base_width(const atomic_base_width&) EA_NOEXCEPT = delete; \ + \ + public: /* store */ \ + \ + using Base::store; \ + \ + void store(T desired) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STORE_FUNC_IMPL(EASTL_ATOMIC_STORE_SEQ_CST_, bits); \ + } \ + \ + void store(T desired, eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STORE_FUNC_IMPL(EASTL_ATOMIC_STORE_RELAXED_, bits); \ + } \ + \ + void store(T desired, eastl::internal::memory_order_release_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STORE_FUNC_IMPL(EASTL_ATOMIC_STORE_RELEASE_, bits); \ + } \ + \ + void store(T desired, eastl::internal::memory_order_seq_cst_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STORE_FUNC_IMPL(EASTL_ATOMIC_STORE_SEQ_CST_, bits); \ + } \ + \ + public: /* load */ \ + \ + using Base::load; \ + \ + T load() const EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_LOAD_FUNC_IMPL(EASTL_ATOMIC_LOAD_SEQ_CST_, bits); \ + } \ + \ + T load(eastl::internal::memory_order_relaxed_s) const EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_LOAD_FUNC_IMPL(EASTL_ATOMIC_LOAD_RELAXED_, bits); \ + } \ + \ + T load(eastl::internal::memory_order_acquire_s) const EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_LOAD_FUNC_IMPL(EASTL_ATOMIC_LOAD_ACQUIRE_, bits); \ + } \ + \ + T load(eastl::internal::memory_order_seq_cst_s) const EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_LOAD_FUNC_IMPL(EASTL_ATOMIC_LOAD_SEQ_CST_, bits); \ + } \ + \ + public: /* exchange */ \ + \ + using Base::exchange; \ + \ + T exchange(T desired) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_EXCHANGE_FUNC_IMPL(EASTL_ATOMIC_EXCHANGE_SEQ_CST_, bits); \ + } \ + \ + T exchange(T desired, eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_EXCHANGE_FUNC_IMPL(EASTL_ATOMIC_EXCHANGE_RELAXED_, bits); \ + } \ + \ + T exchange(T desired, eastl::internal::memory_order_acquire_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_EXCHANGE_FUNC_IMPL(EASTL_ATOMIC_EXCHANGE_ACQUIRE_, bits); \ + } \ + \ + T exchange(T desired, eastl::internal::memory_order_release_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_EXCHANGE_FUNC_IMPL(EASTL_ATOMIC_EXCHANGE_RELEASE_, bits); \ + } \ + \ + T exchange(T desired, eastl::internal::memory_order_acq_rel_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_EXCHANGE_FUNC_IMPL(EASTL_ATOMIC_EXCHANGE_ACQ_REL_, bits); \ + } \ + \ + T exchange(T desired, eastl::internal::memory_order_seq_cst_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_EXCHANGE_FUNC_IMPL(EASTL_ATOMIC_EXCHANGE_SEQ_CST_, bits); \ + } \ + \ + public: /* compare_exchange_weak */ \ + \ + EASTL_ATOMIC_BASE_CMPXCHG_WEAK_FUNCS_IMPL(bits) \ + \ + public: /* compare_exchange_strong */ \ + \ + EASTL_ATOMIC_BASE_CMPXCHG_STRONG_FUNCS_IMPL(bits) \ + \ + public: /* assignment operator */ \ + \ + using Base::operator=; \ + \ + T operator=(T desired) EA_NOEXCEPT \ + { \ + store(desired, eastl::memory_order_seq_cst); \ + return desired; \ + } \ + \ + atomic_base_width& operator=(const atomic_base_width&) EA_NOEXCEPT = delete; \ + atomic_base_width& operator=(const atomic_base_width&) volatile EA_NOEXCEPT = delete; \ + \ + }; + + +#if defined(EASTL_ATOMIC_HAS_8BIT) + EASTL_ATOMIC_BASE_WIDTH_SPECIALIZE(1, 8) +#endif + +#if defined(EASTL_ATOMIC_HAS_16BIT) + EASTL_ATOMIC_BASE_WIDTH_SPECIALIZE(2, 16) +#endif + +#if defined(EASTL_ATOMIC_HAS_32BIT) + EASTL_ATOMIC_BASE_WIDTH_SPECIALIZE(4, 32) +#endif + +#if defined(EASTL_ATOMIC_HAS_64BIT) + EASTL_ATOMIC_BASE_WIDTH_SPECIALIZE(8, 64) +#endif + +#if defined(EASTL_ATOMIC_HAS_128BIT) + EASTL_ATOMIC_BASE_WIDTH_SPECIALIZE(16, 128) +#endif + +EA_RESTORE_VC_WARNING(); + +EA_RESTORE_CLANG_WARNING(); + + +} // namespace internal + + +} // namespace eastl + +#endif /* EASTL_ATOMIC_INTERNAL_BASE_WIDTH_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/atomic_casts.h b/external/EASTL/include/EASTL/internal/atomic/atomic_casts.h new file mode 100644 index 00000000..54b9ed27 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/atomic_casts.h @@ -0,0 +1,190 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_CASTS_H +#define EASTL_ATOMIC_INTERNAL_CASTS_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#include + + +#include + + +namespace eastl +{ + + +namespace internal +{ + + +template +EASTL_FORCE_INLINE volatile T* AtomicVolatileCast(T* ptr) EA_NOEXCEPT +{ + static_assert(!eastl::is_volatile::value, "eastl::atomic : pointer must not be volatile, the pointed to type must be volatile!"); + static_assert(eastl::is_volatile::value, "eastl::atomic : the pointed to type must be volatile!"); + + return reinterpret_cast(ptr); +} + + +/** + * NOTE: + * + * Some compiler intrinsics do not operate on pointer types thus + * doing atomic operations on pointers must be casted to the suitable + * sized unsigned integral type. + * + * Some compiler intrinsics aren't generics and thus structs must also + * be casted to the appropriate sized unsigned integral type. + * + * Atomic operations on an int* might have to be casted to a uint64_t on + * a platform with 8-byte pointers as an example. + * + * Also doing an atomic operation on a struct, we must ensure that we observe + * the whole struct as one atomic unit with no shearing between the members. + * A load of a struct with two uint32_t members must be one uint64_t load, + * not two separate uint32_t loads, thus casted to the suitable sized + * unsigned integral type. + */ +template +EASTL_FORCE_INLINE volatile Integral* AtomicVolatileIntegralCast(T* ptr) EA_NOEXCEPT +{ + static_assert(!eastl::is_volatile::value, "eastl::atomic : pointer must not be volatile, the pointed to type must be volatile!"); + static_assert(eastl::is_volatile::value, "eastl::atomic : the pointed to type must be volatile!"); + static_assert(eastl::is_integral::value, "eastl::atomic : Integral cast must cast to an Integral type!"); + static_assert(sizeof(Integral) == sizeof(T), "eastl::atomic : Integral and T must be same size for casting!"); + + return reinterpret_cast(ptr); +} + +template +EASTL_FORCE_INLINE Integral* AtomicIntegralCast(T* ptr) EA_NOEXCEPT +{ + static_assert(eastl::is_integral::value, "eastl::atomic : Integral cast must cast to an Integral type!"); + static_assert(sizeof(Integral) == sizeof(T), "eastl::atomic : Integral and T must be same size for casting!"); + + return reinterpret_cast(ptr); +} + + +/** + * NOTE: + * + * These casts are meant to be used with unions or structs of larger types that must be casted + * down to the smaller integral types. Like with 128-bit atomics and msvc intrinsics. + * + * struct Foo128 { __int64 array[2]; }; can be casted to a __int64* + * since a poiter to Foo128 is a pointer to the first member. + */ +template +EASTL_FORCE_INLINE volatile ToType* AtomicVolatileTypeCast(FromType* ptr) EA_NOEXCEPT +{ + static_assert(!eastl::is_volatile::value, "eastl::atomic : pointer must not be volatile, the pointed to type must be volatile!"); + static_assert(eastl::is_volatile::value, "eastl::atomic : the pointed to type must be volatile!"); + + return reinterpret_cast(ptr); +} + +template +EASTL_FORCE_INLINE ToType* AtomicTypeCast(FromType* ptr) EA_NOEXCEPT +{ + return reinterpret_cast(ptr); +} + + +/** + * NOTE: + * + * This is a compiler guaranteed safe type punning. + * This is useful when dealing with user defined structs. + * struct Test { uint32_t; unint32_t; }; + * + * Example: + * uint64_t atomicLoad = *((volatile uint64_t*)&Test); + * Test load = AtomicTypePunCast(atomicLoad); + * + * uint64_t comparand = AtomicTypePunCast(Test); + * cmpxchg(&Test, comparand, desired); + * + * This can be implemented in many different ways depending on the compiler such + * as thru a union, memcpy, reinterpret_cast(atomicLoad), etc. + */ +template , int> = 0> +EASTL_FORCE_INLINE Pun AtomicTypePunCast(const T& fromType) EA_NOEXCEPT +{ + static_assert(sizeof(Pun) == sizeof(T), "eastl::atomic : Pun and T must be the same size for type punning!"); + + /** + * aligned_storage ensures we can TypePun objects that aren't trivially default constructible + * but still trivially copyable. + */ + typename eastl::aligned_storage::type ret; + memcpy(eastl::addressof(ret), eastl::addressof(fromType), sizeof(Pun)); + return reinterpret_cast(ret); +} + +template , int> = 0> +EASTL_FORCE_INLINE Pun AtomicTypePunCast(const T& fromType) EA_NOEXCEPT +{ + return fromType; +} + + +template +EASTL_FORCE_INLINE T AtomicNegateOperand(T val) EA_NOEXCEPT +{ + static_assert(eastl::is_integral::value, "eastl::atomic : Integral Negation must be an Integral type!"); + static_assert(!eastl::is_volatile::value, "eastl::atomic : T must not be volatile!"); + + return static_cast(0U - static_cast>(val)); +} + +EASTL_FORCE_INLINE ptrdiff_t AtomicNegateOperand(ptrdiff_t val) EA_NOEXCEPT +{ + return -val; +} + + +} // namespace internal + + +} // namespace eastl + + +/** + * NOTE: + * + * These macros are meant to prevent inclusion hell. + * Also so that it fits with the style of the rest of the atomic macro implementation. + */ +#define EASTL_ATOMIC_VOLATILE_CAST(ptr) \ + eastl::internal::AtomicVolatileCast((ptr)) + +#define EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(IntegralType, ptr) \ + eastl::internal::AtomicVolatileIntegralCast((ptr)) + +#define EASTL_ATOMIC_INTEGRAL_CAST(IntegralType, ptr) \ + eastl::internal::AtomicIntegralCast((ptr)) + +#define EASTL_ATOMIC_VOLATILE_TYPE_CAST(ToType, ptr) \ + eastl::internal::AtomicVolatileTypeCast((ptr)) + +#define EASTL_ATOMIC_TYPE_CAST(ToType, ptr) \ + eastl::internal::AtomicTypeCast((ptr)) + +#define EASTL_ATOMIC_TYPE_PUN_CAST(PunType, fromType) \ + eastl::internal::AtomicTypePunCast((fromType)) + +#define EASTL_ATOMIC_NEGATE_OPERAND(val) \ + eastl::internal::AtomicNegateOperand((val)) + + +#endif /* EASTL_ATOMIC_INTERNAL_CASTS_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/atomic_flag.h b/external/EASTL/include/EASTL/internal/atomic/atomic_flag.h new file mode 100644 index 00000000..6be2069f --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/atomic_flag.h @@ -0,0 +1,178 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNA_ATOMIC_FLAG_H +#define EASTL_ATOMIC_INTERNA_ATOMIC_FLAG_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +namespace eastl +{ + + +// 'class' : multiple assignment operators specified +EA_DISABLE_VC_WARNING(4522); + +// misaligned atomic operation may incur significant performance penalty +// The above warning is emitted in earlier versions of clang incorrectly. +// All eastl::atomic objects are size aligned. +// This is static and runtime asserted. +// Thus we disable this warning. +EA_DISABLE_CLANG_WARNING(-Watomic-alignment); + + +class atomic_flag +{ +public: /* ctors */ + + EA_CONSTEXPR atomic_flag(bool desired) EA_NOEXCEPT + : mFlag{ desired } + { + } + + EA_CONSTEXPR atomic_flag() EA_NOEXCEPT + : mFlag{ false } + { + } + +public: /* deleted ctors && assignment operators */ + + atomic_flag(const atomic_flag&) EA_NOEXCEPT = delete; + + atomic_flag& operator=(const atomic_flag&) EA_NOEXCEPT = delete; + atomic_flag& operator=(const atomic_flag&) volatile EA_NOEXCEPT = delete; + +public: /* clear */ + + template + void clear(Order /*order*/) volatile EA_NOEXCEPT + { + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(Order); + } + + template + void clear(Order /*order*/) EA_NOEXCEPT + { + EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(Order); + } + + void clear(eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT + { + mFlag.store(false, eastl::memory_order_relaxed); + } + + void clear(eastl::internal::memory_order_release_s) EA_NOEXCEPT + { + mFlag.store(false, eastl::memory_order_release); + } + + void clear(eastl::internal::memory_order_seq_cst_s) EA_NOEXCEPT + { + mFlag.store(false, eastl::memory_order_seq_cst); + } + + void clear() EA_NOEXCEPT + { + mFlag.store(false, eastl::memory_order_seq_cst); + } + +public: /* test_and_set */ + + template + bool test_and_set(Order /*order*/) volatile EA_NOEXCEPT + { + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(Order); + return false; + } + + template + bool test_and_set(Order /*order*/) EA_NOEXCEPT + { + EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(Order); + return false; + } + + bool test_and_set(eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT + { + return mFlag.exchange(true, eastl::memory_order_relaxed); + } + + bool test_and_set(eastl::internal::memory_order_acquire_s) EA_NOEXCEPT + { + return mFlag.exchange(true, eastl::memory_order_acquire); + } + + bool test_and_set(eastl::internal::memory_order_release_s) EA_NOEXCEPT + { + return mFlag.exchange(true, eastl::memory_order_release); + } + + bool test_and_set(eastl::internal::memory_order_acq_rel_s) EA_NOEXCEPT + { + return mFlag.exchange(true, eastl::memory_order_acq_rel); + } + + bool test_and_set(eastl::internal::memory_order_seq_cst_s) EA_NOEXCEPT + { + return mFlag.exchange(true, eastl::memory_order_seq_cst); + } + + bool test_and_set() EA_NOEXCEPT + { + return mFlag.exchange(true, eastl::memory_order_seq_cst); + } + +public: /* test */ + + template + bool test(Order /*order*/) const volatile EA_NOEXCEPT + { + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(Order); + return false; + } + + template + bool test(Order /*order*/) const EA_NOEXCEPT + { + EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(Order); + return false; + } + + bool test(eastl::internal::memory_order_relaxed_s) const EA_NOEXCEPT + { + return mFlag.load(eastl::memory_order_relaxed); + } + + bool test(eastl::internal::memory_order_acquire_s) const EA_NOEXCEPT + { + return mFlag.load(eastl::memory_order_acquire); + } + + bool test(eastl::internal::memory_order_seq_cst_s) const EA_NOEXCEPT + { + return mFlag.load(eastl::memory_order_seq_cst); + } + + bool test() const EA_NOEXCEPT + { + return mFlag.load(eastl::memory_order_seq_cst); + } + +private: + + eastl::atomic mFlag; +}; + +EA_RESTORE_VC_WARNING(); + +EA_RESTORE_CLANG_WARNING(); + + +} // namespace eastl + +#endif /* EASTL_ATOMIC_INTERNA_ATOMIC_FLAG_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/atomic_flag_standalone.h b/external/EASTL/include/EASTL/internal/atomic/atomic_flag_standalone.h new file mode 100644 index 00000000..b5284bed --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/atomic_flag_standalone.h @@ -0,0 +1,69 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_FLAG_STANDALONE_H +#define EASTL_ATOMIC_INTERNAL_FLAG_STANDALONE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +namespace eastl +{ + + +//////////////////////////////////////////////////////////////////////////////// +// +// bool atomic_flag_test_and_set(eastl::atomic*) +// +EASTL_FORCE_INLINE bool atomic_flag_test_and_set(eastl::atomic_flag* atomicObj) EA_NOEXCEPT +{ + return atomicObj->test_and_set(); +} + +template +EASTL_FORCE_INLINE bool atomic_flag_test_and_set_explicit(eastl::atomic_flag* atomicObj, Order order) +{ + return atomicObj->test_and_set(order); +} + + +//////////////////////////////////////////////////////////////////////////////// +// +// bool atomic_flag_clear(eastl::atomic*) +// +EASTL_FORCE_INLINE void atomic_flag_clear(eastl::atomic_flag* atomicObj) +{ + atomicObj->clear(); +} + +template +EASTL_FORCE_INLINE void atomic_flag_clear_explicit(eastl::atomic_flag* atomicObj, Order order) +{ + atomicObj->clear(order); +} + + +//////////////////////////////////////////////////////////////////////////////// +// +// bool atomic_flag_test(eastl::atomic*) +// +EASTL_FORCE_INLINE bool atomic_flag_test(eastl::atomic_flag* atomicObj) +{ + return atomicObj->test(); +} + +template +EASTL_FORCE_INLINE bool atomic_flag_test_explicit(eastl::atomic_flag* atomicObj, Order order) +{ + return atomicObj->test(order); +} + + +} // namespace eastl + + +#endif /* EASTL_ATOMIC_INTERNAL_FLAG_STANDALONE_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/atomic_integral.h b/external/EASTL/include/EASTL/internal/atomic/atomic_integral.h new file mode 100644 index 00000000..8ec2ed74 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/atomic_integral.h @@ -0,0 +1,351 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_INTEGRAL_H +#define EASTL_ATOMIC_INTERNAL_INTEGRAL_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +namespace eastl +{ + + +namespace internal +{ + + +// 'class' : multiple assignment operators specified +EA_DISABLE_VC_WARNING(4522); + +// misaligned atomic operation may incur significant performance penalty +// The above warning is emitted in earlier versions of clang incorrectly. +// All eastl::atomic objects are size aligned. +// This is static and runtime asserted. +// Thus we disable this warning. +EA_DISABLE_CLANG_WARNING(-Watomic-alignment); + + +#define EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(funcName) \ + template \ + T funcName(T /*arg*/, Order /*order*/) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(T); \ + } \ + \ + template \ + T funcName(T /*arg*/, Order /*order*/) volatile EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \ + } \ + \ + T funcName(T /*arg*/) volatile EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \ + } + + +#define EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_INC_DEC_OPERATOR_IMPL(operatorOp) \ + T operator operatorOp() volatile EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \ + } \ + \ + T operator operatorOp(int) volatile EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \ + } + + +#define EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(operatorOp) \ + T operator operatorOp(T /*arg*/) volatile EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \ + } + + + template + struct atomic_integral_base : public atomic_base_width + { + private: + + using Base = atomic_base_width; + + public: /* ctors */ + + EA_CONSTEXPR atomic_integral_base(T desired) EA_NOEXCEPT + : Base{ desired } + { + } + + EA_CONSTEXPR atomic_integral_base() EA_NOEXCEPT = default; + + atomic_integral_base(const atomic_integral_base&) EA_NOEXCEPT = delete; + + public: /* assignment operator */ + + using Base::operator=; + + atomic_integral_base& operator=(const atomic_integral_base&) EA_NOEXCEPT = delete; + atomic_integral_base& operator=(const atomic_integral_base&) volatile EA_NOEXCEPT = delete; + + public: /* fetch_add */ + + EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(fetch_add) + + public: /* add_fetch */ + + EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(add_fetch) + + public: /* fetch_sub */ + + EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(fetch_sub) + + public: /* sub_fetch */ + + EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(sub_fetch) + + public: /* fetch_and */ + + EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(fetch_and) + + public: /* and_fetch */ + + EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(and_fetch) + + public: /* fetch_or */ + + EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(fetch_or) + + public: /* or_fetch */ + + EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(or_fetch) + + public: /* fetch_xor */ + + EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(fetch_xor) + + public: /* xor_fetch */ + + EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(xor_fetch) + + public: /* operator++ && operator-- */ + + EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_INC_DEC_OPERATOR_IMPL(++) + + EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_INC_DEC_OPERATOR_IMPL(--) + + public: /* operator+= && operator-= */ + + EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(+=) + + EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(-=) + + public: /* operator&= */ + + EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(&=) + + public: /* operator|= */ + + EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(|=) + + public: /* operator^= */ + + EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(^=) + + }; + + + template + struct atomic_integral_width; + +#define EASTL_ATOMIC_INTEGRAL_FUNC_IMPL(op, bits) \ + EASTL_ATOMIC_DEFAULT_INIT(T, retVal); \ + EA_PREPROCESSOR_JOIN(op, bits)(T, retVal, this->GetAtomicAddress(), arg); \ + return retVal; + +#define EASTL_ATOMIC_INTEGRAL_FETCH_IMPL(funcName, op, bits) \ + T funcName(T arg) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_INTEGRAL_FUNC_IMPL(op, bits); \ + } + +#define EASTL_ATOMIC_INTEGRAL_FETCH_ORDER_IMPL(funcName, orderType, op, bits) \ + T funcName(T arg, orderType) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_INTEGRAL_FUNC_IMPL(op, bits); \ + } + +#define EASTL_ATOMIC_INTEGRAL_FETCH_OP_JOIN(fetchOp, Order) \ + EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_, fetchOp), Order) + +#define EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(funcName, fetchOp, bits) \ + using Base::funcName; \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_IMPL(funcName, EASTL_ATOMIC_INTEGRAL_FETCH_OP_JOIN(fetchOp, _SEQ_CST_), bits) \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_relaxed_s, \ + EASTL_ATOMIC_INTEGRAL_FETCH_OP_JOIN(fetchOp, _RELAXED_), bits) \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_acquire_s, \ + EASTL_ATOMIC_INTEGRAL_FETCH_OP_JOIN(fetchOp, _ACQUIRE_), bits) \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_release_s, \ + EASTL_ATOMIC_INTEGRAL_FETCH_OP_JOIN(fetchOp, _RELEASE_), bits) \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_acq_rel_s, \ + EASTL_ATOMIC_INTEGRAL_FETCH_OP_JOIN(fetchOp, _ACQ_REL_), bits) \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_seq_cst_s, \ + EASTL_ATOMIC_INTEGRAL_FETCH_OP_JOIN(fetchOp, _SEQ_CST_), bits) + +#define EASTL_ATOMIC_INTEGRAL_FETCH_INC_DEC_OPERATOR_IMPL(operatorOp, preFuncName, postFuncName) \ + using Base::operator operatorOp; \ + \ + T operator operatorOp() EA_NOEXCEPT \ + { \ + return preFuncName(1, eastl::memory_order_seq_cst); \ + } \ + \ + T operator operatorOp(int) EA_NOEXCEPT \ + { \ + return postFuncName(1, eastl::memory_order_seq_cst); \ + } + +#define EASTL_ATOMIC_INTEGRAL_FETCH_ASSIGNMENT_OPERATOR_IMPL(operatorOp, funcName) \ + using Base::operator operatorOp; \ + \ + T operator operatorOp(T arg) EA_NOEXCEPT \ + { \ + return funcName(arg, eastl::memory_order_seq_cst); \ + } + + +#define EASTL_ATOMIC_INTEGRAL_WIDTH_SPECIALIZE(bytes, bits) \ + template \ + struct atomic_integral_width : public atomic_integral_base \ + { \ + private: \ + \ + using Base = atomic_integral_base; \ + \ + public: /* ctors */ \ + \ + EA_CONSTEXPR atomic_integral_width(T desired) EA_NOEXCEPT \ + : Base{ desired } \ + { \ + } \ + \ + EA_CONSTEXPR atomic_integral_width() EA_NOEXCEPT = default; \ + \ + atomic_integral_width(const atomic_integral_width&) EA_NOEXCEPT = delete; \ + \ + public: /* assignment operator */ \ + \ + using Base::operator=; \ + \ + atomic_integral_width& operator=(const atomic_integral_width&) EA_NOEXCEPT = delete; \ + atomic_integral_width& operator=(const atomic_integral_width&) volatile EA_NOEXCEPT = delete; \ + \ + public: /* fetch_add */ \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(fetch_add, FETCH_ADD, bits) \ + \ + public: /* add_fetch */ \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(add_fetch, ADD_FETCH, bits) \ + \ + public: /* fetch_sub */ \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(fetch_sub, FETCH_SUB, bits) \ + \ + public: /* sub_fetch */ \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(sub_fetch, SUB_FETCH, bits) \ + \ + public: /* fetch_and */ \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(fetch_and, FETCH_AND, bits) \ + \ + public: /* and_fetch */ \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(and_fetch, AND_FETCH, bits) \ + \ + public: /* fetch_or */ \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(fetch_or, FETCH_OR, bits) \ + \ + public: /* or_fetch */ \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(or_fetch, OR_FETCH, bits) \ + \ + public: /* fetch_xor */ \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(fetch_xor, FETCH_XOR, bits) \ + \ + public: /* xor_fetch */ \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(xor_fetch, XOR_FETCH, bits) \ + \ + public: /* operator++ && operator-- */ \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_INC_DEC_OPERATOR_IMPL(++, add_fetch, fetch_add) \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_INC_DEC_OPERATOR_IMPL(--, sub_fetch, fetch_sub) \ + \ + public: /* operator+= && operator-= */ \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_ASSIGNMENT_OPERATOR_IMPL(+=, add_fetch) \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_ASSIGNMENT_OPERATOR_IMPL(-=, sub_fetch) \ + \ + public: /* operator&= */ \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_ASSIGNMENT_OPERATOR_IMPL(&=, and_fetch) \ + \ + public: /* operator|= */ \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_ASSIGNMENT_OPERATOR_IMPL(|=, or_fetch) \ + \ + public: /* operator^= */ \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_ASSIGNMENT_OPERATOR_IMPL(^=, xor_fetch) \ + \ + }; + + +#if defined(EASTL_ATOMIC_HAS_8BIT) + EASTL_ATOMIC_INTEGRAL_WIDTH_SPECIALIZE(1, 8) +#endif + +#if defined(EASTL_ATOMIC_HAS_16BIT) + EASTL_ATOMIC_INTEGRAL_WIDTH_SPECIALIZE(2, 16) +#endif + +#if defined(EASTL_ATOMIC_HAS_32BIT) + EASTL_ATOMIC_INTEGRAL_WIDTH_SPECIALIZE(4, 32) +#endif + +#if defined(EASTL_ATOMIC_HAS_64BIT) + EASTL_ATOMIC_INTEGRAL_WIDTH_SPECIALIZE(8, 64) +#endif + +#if defined(EASTL_ATOMIC_HAS_128BIT) + EASTL_ATOMIC_INTEGRAL_WIDTH_SPECIALIZE(16, 128) +#endif + +EA_RESTORE_VC_WARNING(); + +EA_RESTORE_CLANG_WARNING(); + + +} // namespace internal + + +} // namespace eastl + +#endif /* EASTL_ATOMIC_INTERNAL_INTEGRAL_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/atomic_macros.h b/external/EASTL/include/EASTL/internal/atomic/atomic_macros.h new file mode 100644 index 00000000..756a4b4d --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/atomic_macros.h @@ -0,0 +1,67 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_H +#define EASTL_ATOMIC_INTERNAL_MACROS_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// The reason for the implementation separating out into a compiler and architecture +// folder is as follows. +// +// The compiler directory is meant to implement atomics using the compiler provided +// intrinsics. This also implies that usually the same compiler instrinsic implementation +// can be used for any architecture the compiler supports. If a compiler provides intrinsics +// to support barriers or atomic operations, then that implementation should be in the +// compiler directory. +// +// The arch directory is meant to manually implement atomics for a specific architecture +// such as power or x86. There may be some compiler specific code in this directory because +// GCC inline assembly syntax may be different than another compiler as an example. +// +// The arch directory can also be used to implement some atomic operations ourselves +// if we deem the compiler provided implementation to be inefficient for the given +// architecture or we need to do some things manually for a given compiler. +// +// The atomic_macros directory implements the macros that the rest of the atomic +// library uses. These macros will expand to either the compiler or arch implemented +// macro. The arch implemented macro is given priority over the compiler implemented +// macro if both are implemented otherwise whichever is implemented is chosen or +// an error is emitted if none are implemented. +// +// The implementation being all macros has a couple nice side effects as well. +// +// 1. All the implementation ends up funneling into one low level macro implementation +// which makes it easy to verify correctness, reduce copy-paste errors and differences +// in various platform implementations. +// +// 2. Allows for the implementation to be implemented efficiently on compilers that do not +// directly implement the C++ memory model in their intrinsics such as msvc. +// +// 3. Allows for the implementation of atomics that may not be supported on the given platform, +// such as 128-bit atomics on 32-bit platforms since the macros will only ever be expanded +// on platforms that support said features. This makes implementing said features pretty easy +// since we do not have to worry about complicated feature detection in the low level implementations. +// +// The macro implementation may asume that all passed in types are trivially constructible thus it is +// free to create local variables of the passed in types as it may please. +// It may also assume that all passed in types are trivially copyable as well. +// It cannot assume any passed in type is any given type thus is a specific type if needed, it must do an +// EASTL_ATOMIC_TYPE_PUN_CAST() to the required type. +// + + +#include "compiler/compiler.h" +#include "arch/arch.h" + +#include "atomic_macros/atomic_macros.h" + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros.h b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros.h new file mode 100644 index 00000000..437b221e --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros.h @@ -0,0 +1,156 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ATOMIC_MACROS_H +#define EASTL_ATOMIC_INTERNAL_ATOMIC_MACROS_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +#include + +#include "atomic_macros_base.h" + +#include "atomic_macros_fetch_add.h" +#include "atomic_macros_fetch_sub.h" + +#include "atomic_macros_fetch_and.h" +#include "atomic_macros_fetch_xor.h" +#include "atomic_macros_fetch_or.h" + +#include "atomic_macros_add_fetch.h" +#include "atomic_macros_sub_fetch.h" + +#include "atomic_macros_and_fetch.h" +#include "atomic_macros_xor_fetch.h" +#include "atomic_macros_or_fetch.h" + +#include "atomic_macros_exchange.h" + +#include "atomic_macros_cmpxchg_weak.h" +#include "atomic_macros_cmpxchg_strong.h" + +#include "atomic_macros_load.h" +#include "atomic_macros_store.h" + +#include "atomic_macros_compiler_barrier.h" + +#include "atomic_macros_cpu_pause.h" + +#include "atomic_macros_memory_barrier.h" + +#include "atomic_macros_signal_fence.h" + +#include "atomic_macros_thread_fence.h" + + +///////////////////////////////////////////////////////////////////////////////// + + +#if defined(EASTL_COMPILER_ATOMIC_HAS_8BIT) || defined(EASTL_ARCH_ATOMIC_HAS_8BIT) + + #define EASTL_ATOMIC_HAS_8BIT + +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_HAS_16BIT) || defined(EASTL_ARCH_ATOMIC_HAS_16BIT) + + #define EASTL_ATOMIC_HAS_16BIT + +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_HAS_32BIT) || defined(EASTL_ARCH_ATOMIC_HAS_32BIT) + + #define EASTL_ATOMIC_HAS_32BIT + +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_HAS_64BIT) || defined(EASTL_ARCH_ATOMIC_HAS_64BIT) + + #define EASTL_ATOMIC_HAS_64BIT + +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_HAS_128BIT) || defined(EASTL_ARCH_ATOMIC_HAS_128BIT) + + #define EASTL_ATOMIC_HAS_128BIT + +#endif + + +///////////////////////////////////////////////////////////////////////////////// + + +#if defined(EASTL_ARCH_ATOMIC_FIXED_WIDTH_TYPE_8) + + #define EASTL_ATOMIC_FIXED_WIDTH_TYPE_8 EASTL_ARCH_ATOMIC_FIXED_WIDTH_TYPE_8 + +#elif defined(EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_8) + + #define EASTL_ATOMIC_FIXED_WIDTH_TYPE_8 EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_8 + +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FIXED_WIDTH_TYPE_16) + + #define EASTL_ATOMIC_FIXED_WIDTH_TYPE_16 EASTL_ARCH_ATOMIC_FIXED_WIDTH_TYPE_16 + +#elif defined(EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_16) + + #define EASTL_ATOMIC_FIXED_WIDTH_TYPE_16 EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_16 + +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FIXED_WIDTH_TYPE_32) + + #define EASTL_ATOMIC_FIXED_WIDTH_TYPE_32 EASTL_ARCH_ATOMIC_FIXED_WIDTH_TYPE_32 + +#elif defined(EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_32) + + #define EASTL_ATOMIC_FIXED_WIDTH_TYPE_32 EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_32 + +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FIXED_WIDTH_TYPE_64) + + #define EASTL_ATOMIC_FIXED_WIDTH_TYPE_64 EASTL_ARCH_ATOMIC_FIXED_WIDTH_TYPE_64 + +#elif defined(EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_64) + + #define EASTL_ATOMIC_FIXED_WIDTH_TYPE_64 EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_64 + +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FIXED_WIDTH_TYPE_128) + + #define EASTL_ATOMIC_FIXED_WIDTH_TYPE_128 EASTL_ARCH_ATOMIC_FIXED_WIDTH_TYPE_128 + +#elif defined(EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_128) + + #define EASTL_ATOMIC_FIXED_WIDTH_TYPE_128 EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_128 + +#endif + +// We write some of our variables in inline assembly, which MSAN +// doesn't understand. This macro forces initialization of those +// variables when MSAN is enabled and doesn't pay the initialization +// cost when it's not enabled. +#if EA_MSAN_ENABLED + #define EASTL_ATOMIC_DEFAULT_INIT(type, var) type var{} +#else + #define EASTL_ATOMIC_DEFAULT_INIT(type, var) type var +#endif // EA_MSAN_ENABLED + + +#endif /* EASTL_ATOMIC_INTERNAL_ATOMIC_MACROS_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_add_fetch.h b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_add_fetch.h new file mode 100644 index 00000000..f551a07c --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_add_fetch.h @@ -0,0 +1,98 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_ADD_FETCH_H +#define EASTL_ATOMIC_INTERNAL_MACROS_ADD_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_ADD_FETCH_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_ATOMIC_ADD_FETCH_RELAXED_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_RELAXED_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_ADD_FETCH_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_ACQUIRE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_ADD_FETCH_RELEASE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_RELEASE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_ADD_FETCH_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_ACQ_REL_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_ADD_FETCH_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_SEQ_CST_8)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_ADD_FETCH_RELAXED_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_RELAXED_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_ADD_FETCH_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_ACQUIRE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_ADD_FETCH_RELEASE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_RELEASE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_ADD_FETCH_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_ACQ_REL_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_ADD_FETCH_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_SEQ_CST_16)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_ADD_FETCH_RELAXED_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_RELAXED_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_ADD_FETCH_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_ACQUIRE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_ADD_FETCH_RELEASE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_RELEASE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_ADD_FETCH_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_ACQ_REL_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_ADD_FETCH_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_SEQ_CST_32)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_ADD_FETCH_RELAXED_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_RELAXED_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_ADD_FETCH_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_ACQUIRE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_ADD_FETCH_RELEASE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_RELEASE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_ADD_FETCH_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_ACQ_REL_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_ADD_FETCH_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_SEQ_CST_64)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_ADD_FETCH_RELAXED_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_RELAXED_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_ADD_FETCH_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_ACQUIRE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_ADD_FETCH_RELEASE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_RELEASE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_ADD_FETCH_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_ACQ_REL_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_ADD_FETCH_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_SEQ_CST_128)(type, ret, ptr, val) + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_ADD_FETCH_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_and_fetch.h b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_and_fetch.h new file mode 100644 index 00000000..69127223 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_and_fetch.h @@ -0,0 +1,98 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_AND_FETCH_H +#define EASTL_ATOMIC_INTERNAL_MACROS_AND_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_AND_FETCH_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_ATOMIC_AND_FETCH_RELAXED_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_RELAXED_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_AND_FETCH_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_ACQUIRE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_AND_FETCH_RELEASE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_RELEASE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_AND_FETCH_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_ACQ_REL_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_AND_FETCH_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_SEQ_CST_8)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_AND_FETCH_RELAXED_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_RELAXED_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_AND_FETCH_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_ACQUIRE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_AND_FETCH_RELEASE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_RELEASE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_AND_FETCH_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_ACQ_REL_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_AND_FETCH_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_SEQ_CST_16)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_AND_FETCH_RELAXED_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_RELAXED_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_AND_FETCH_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_ACQUIRE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_AND_FETCH_RELEASE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_RELEASE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_AND_FETCH_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_ACQ_REL_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_AND_FETCH_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_SEQ_CST_32)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_AND_FETCH_RELAXED_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_RELAXED_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_AND_FETCH_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_ACQUIRE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_AND_FETCH_RELEASE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_RELEASE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_AND_FETCH_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_ACQ_REL_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_AND_FETCH_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_SEQ_CST_64)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_AND_FETCH_RELAXED_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_RELAXED_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_AND_FETCH_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_ACQUIRE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_AND_FETCH_RELEASE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_RELEASE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_AND_FETCH_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_ACQ_REL_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_AND_FETCH_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_SEQ_CST_128)(type, ret, ptr, val) + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_AND_FETCH_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_base.h b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_base.h new file mode 100644 index 00000000..486e137a --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_base.h @@ -0,0 +1,70 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_BASE_H +#define EASTL_ATOMIC_INTERNAL_MACROS_BASE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_ATOMIC_INTERNAL_COMPILER_AVAILABLE(op) \ + EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_COMPILER_, op), _AVAILABLE) + +#define EASTL_ATOMIC_INTERNAL_ARCH_AVAILABLE(op) \ + EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ARCH_, op), _AVAILABLE) + + +// We can't just use static_assert(false, ...) here, since on MSVC 17.10 +// the /Zc:static_assert flag makes non-dependent static_asserts in the body of a template +// be evaluated at template-parse time, rather than at template instantion time. +// So instead we just make the assert dependent on the type. +#define EASTL_ATOMIC_INTERNAL_NOT_IMPLEMENTED_ERROR(...) \ + static_assert(!eastl::is_same_v, "eastl::atomic atomic macro not implemented!") + + +/* Compiler && Arch Not Implemented */ +#define EASTL_ATOMIC_INTERNAL_OP_PATTERN_00(op) \ + EASTL_ATOMIC_INTERNAL_NOT_IMPLEMENTED_ERROR + +/* Arch Implemented */ +#define EASTL_ATOMIC_INTERNAL_OP_PATTERN_01(op) \ + EA_PREPROCESSOR_JOIN(EASTL_ARCH_, op) + +/* Compiler Implmented */ +#define EASTL_ATOMIC_INTERNAL_OP_PATTERN_10(op) \ + EA_PREPROCESSOR_JOIN(EASTL_COMPILER_, op) + +/* Compiler && Arch Implemented */ +#define EASTL_ATOMIC_INTERNAL_OP_PATTERN_11(op) \ + EA_PREPROCESSOR_JOIN(EASTL_ARCH_, op) + + +/* This macro creates the pattern macros above for the 2x2 True-False truth table */ +#define EASTL_ATOMIC_INTERNAL_OP_HELPER1(compiler, arch, op) \ + EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_INTERNAL_OP_PATTERN_, EA_PREPROCESSOR_JOIN(compiler, arch))(op) + + +///////////////////////////////////////////////////////////////////////////////// +// +// EASTL_ATOMIC_CHOOSE_OP_IMPL +// +// This macro chooses between the compiler or architecture implementation for a +// given atomic operation. +// +// USAGE: +// +// EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELAXED_8)(ret, ptr, val) +// +#define EASTL_ATOMIC_CHOOSE_OP_IMPL(op) \ + EASTL_ATOMIC_INTERNAL_OP_HELPER1( \ + EASTL_ATOMIC_INTERNAL_COMPILER_AVAILABLE(op), \ + EASTL_ATOMIC_INTERNAL_ARCH_AVAILABLE(op), \ + op \ + ) + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_BASE_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_cmpxchg_strong.h b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_cmpxchg_strong.h new file mode 100644 index 00000000..3cff4935 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_cmpxchg_strong.h @@ -0,0 +1,245 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_CMPXCHG_STRONG_H +#define EASTL_ATOMIC_INTERNAL_MACROS_CMPXCHG_STRONG_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_CMPXCHG_STRONG_*_*_N(type, bool ret, type * ptr, type * expected, type desired) +// +#define EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8)(type, ret, ptr, expected, desired) + + +#define EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16)(type, ret, ptr, expected, desired) + + +#define EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32)(type, ret, ptr, expected, desired) + + +#define EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64)(type, ret, ptr, expected, desired) + + +#define EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128)(type, ret, ptr, expected, desired) + + +///////////////////////////////////////////////////////////////////////////////// + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_CMPXCHG_STRONG_*(bool ret, type * ptr, type * expected, type desired) +// +#define EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELAXED_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELEASE_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_8)(type, ret, ptr, expected, desired) + + +#define EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELAXED_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELEASE_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_16)(type, ret, ptr, expected, desired) + + +#define EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELAXED_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELEASE_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_32)(type, ret, ptr, expected, desired) + + +#define EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELAXED_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELEASE_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_64)(type, ret, ptr, expected, desired) + + +#define EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELAXED_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELEASE_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_128)(type, ret, ptr, expected, desired) + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_CMPXCHG_STRONG_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_cmpxchg_weak.h b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_cmpxchg_weak.h new file mode 100644 index 00000000..60ea8b0b --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_cmpxchg_weak.h @@ -0,0 +1,245 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_CMPXCHG_WEAK_H +#define EASTL_ATOMIC_INTERNAL_MACROS_CMPXCHG_WEAK_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_CMPXCHG_WEAK_*_*_N(type, bool ret, type * ptr, type * expected, type desired) +// +#define EASTL_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8)(type, ret, ptr, expected, desired) + + +#define EASTL_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16)(type, ret, ptr, expected, desired) + + +#define EASTL_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32)(type, ret, ptr, expected, desired) + + +#define EASTL_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64)(type, ret, ptr, expected, desired) + + +#define EASTL_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128)(type, ret, ptr, expected, desired) + + +///////////////////////////////////////////////////////////////////////////////// + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_CMPXCHG_WEAK_*(bool ret, type * ptr, type * expected, type desired) +// +#define EASTL_ATOMIC_CMPXCHG_WEAK_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELAXED_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_RELEASE_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELEASE_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_8)(type, ret, ptr, expected, desired) + + +#define EASTL_ATOMIC_CMPXCHG_WEAK_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELAXED_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_RELEASE_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELEASE_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_16)(type, ret, ptr, expected, desired) + + +#define EASTL_ATOMIC_CMPXCHG_WEAK_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELAXED_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_RELEASE_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELEASE_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_32)(type, ret, ptr, expected, desired) + + +#define EASTL_ATOMIC_CMPXCHG_WEAK_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELAXED_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_RELEASE_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELEASE_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_64)(type, ret, ptr, expected, desired) + + +#define EASTL_ATOMIC_CMPXCHG_WEAK_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELAXED_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_RELEASE_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELEASE_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_128)(type, ret, ptr, expected, desired) + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_CMPXCHG_WEAK_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_compiler_barrier.h b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_compiler_barrier.h new file mode 100644 index 00000000..96ea6d0b --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_compiler_barrier.h @@ -0,0 +1,30 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_COMPILER_BARRIER_H +#define EASTL_ATOMIC_INTERNAL_MACROS_COMPILER_BARRIER_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_COMPILER_BARRIER() +// +#define EASTL_ATOMIC_COMPILER_BARRIER() \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_COMPILER_BARRIER)() + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY(const T&, type) +// +#define EASTL_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY(val, type) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY)(val, type) + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_COMPILER_BARRIER_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_cpu_pause.h b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_cpu_pause.h new file mode 100644 index 00000000..e027b576 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_cpu_pause.h @@ -0,0 +1,22 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_CPU_PAUSE_H +#define EASTL_ATOMIC_INTERNAL_MACROS_CPU_PAUSE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_CPU_PAUSE() +// +#define EASTL_ATOMIC_CPU_PAUSE() \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CPU_PAUSE)() + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_CPU_PAUSE_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_exchange.h b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_exchange.h new file mode 100644 index 00000000..0681318f --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_exchange.h @@ -0,0 +1,98 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_EXCHANGE_H +#define EASTL_ATOMIC_INTERNAL_MACROS_EXCHANGE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_EXCHANGE_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_ATOMIC_EXCHANGE_RELAXED_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_RELAXED_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_EXCHANGE_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_ACQUIRE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_EXCHANGE_RELEASE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_RELEASE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_EXCHANGE_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_ACQ_REL_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_EXCHANGE_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_SEQ_CST_8)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_EXCHANGE_RELAXED_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_RELAXED_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_EXCHANGE_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_ACQUIRE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_EXCHANGE_RELEASE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_RELEASE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_EXCHANGE_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_ACQ_REL_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_EXCHANGE_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_SEQ_CST_16)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_EXCHANGE_RELAXED_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_RELAXED_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_EXCHANGE_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_ACQUIRE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_EXCHANGE_RELEASE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_RELEASE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_EXCHANGE_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_ACQ_REL_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_EXCHANGE_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_SEQ_CST_32)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_EXCHANGE_RELAXED_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_RELAXED_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_EXCHANGE_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_ACQUIRE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_EXCHANGE_RELEASE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_RELEASE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_EXCHANGE_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_ACQ_REL_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_EXCHANGE_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_SEQ_CST_64)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_EXCHANGE_RELAXED_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_RELAXED_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_EXCHANGE_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_ACQUIRE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_EXCHANGE_RELEASE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_RELEASE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_EXCHANGE_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_ACQ_REL_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_EXCHANGE_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_SEQ_CST_128)(type, ret, ptr, val) + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_EXCHANGE_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_add.h b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_add.h new file mode 100644 index 00000000..701fdf37 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_add.h @@ -0,0 +1,98 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_FETCH_ADD_H +#define EASTL_ATOMIC_INTERNAL_MACROS_FETCH_ADD_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_FETCH_ADD_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_ATOMIC_FETCH_ADD_RELAXED_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELAXED_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_ADD_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_ACQUIRE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_ADD_RELEASE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELEASE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_ADD_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_ACQ_REL_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_ADD_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_SEQ_CST_8)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_FETCH_ADD_RELAXED_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELAXED_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_ADD_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_ACQUIRE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_ADD_RELEASE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELEASE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_ADD_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_ACQ_REL_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_ADD_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_SEQ_CST_16)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_FETCH_ADD_RELAXED_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELAXED_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_ADD_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_ACQUIRE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_ADD_RELEASE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELEASE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_ADD_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_ACQ_REL_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_ADD_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_SEQ_CST_32)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_FETCH_ADD_RELAXED_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELAXED_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_ADD_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_ACQUIRE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_ADD_RELEASE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELEASE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_ADD_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_ACQ_REL_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_ADD_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_SEQ_CST_64)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_FETCH_ADD_RELAXED_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELAXED_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_ADD_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_ACQUIRE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_ADD_RELEASE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELEASE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_ADD_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_ACQ_REL_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_ADD_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_SEQ_CST_128)(type, ret, ptr, val) + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_FETCH_ADD_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_and.h b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_and.h new file mode 100644 index 00000000..831f1bfe --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_and.h @@ -0,0 +1,98 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_FETCH_AND_H +#define EASTL_ATOMIC_INTERNAL_MACROS_FETCH_AND_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_FETCH_AND_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_ATOMIC_FETCH_AND_RELAXED_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_RELAXED_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_AND_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_ACQUIRE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_AND_RELEASE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_RELEASE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_AND_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_ACQ_REL_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_AND_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_SEQ_CST_8)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_FETCH_AND_RELAXED_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_RELAXED_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_AND_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_ACQUIRE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_AND_RELEASE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_RELEASE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_AND_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_ACQ_REL_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_AND_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_SEQ_CST_16)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_FETCH_AND_RELAXED_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_RELAXED_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_AND_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_ACQUIRE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_AND_RELEASE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_RELEASE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_AND_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_ACQ_REL_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_AND_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_SEQ_CST_32)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_FETCH_AND_RELAXED_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_RELAXED_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_AND_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_ACQUIRE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_AND_RELEASE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_RELEASE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_AND_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_ACQ_REL_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_AND_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_SEQ_CST_64)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_FETCH_AND_RELAXED_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_RELAXED_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_AND_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_ACQUIRE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_AND_RELEASE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_RELEASE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_AND_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_ACQ_REL_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_AND_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_SEQ_CST_128)(type, ret, ptr, val) + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_FETCH_AND_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_or.h b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_or.h new file mode 100644 index 00000000..b1322970 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_or.h @@ -0,0 +1,98 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_FETCH_OR_H +#define EASTL_ATOMIC_INTERNAL_MACROS_FETCH_OR_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_FETCH_OR_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_ATOMIC_FETCH_OR_RELAXED_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_RELAXED_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_OR_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_ACQUIRE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_OR_RELEASE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_RELEASE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_OR_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_ACQ_REL_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_OR_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_SEQ_CST_8)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_FETCH_OR_RELAXED_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_RELAXED_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_OR_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_ACQUIRE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_OR_RELEASE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_RELEASE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_OR_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_ACQ_REL_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_OR_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_SEQ_CST_16)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_FETCH_OR_RELAXED_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_RELAXED_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_OR_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_ACQUIRE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_OR_RELEASE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_RELEASE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_OR_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_ACQ_REL_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_OR_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_SEQ_CST_32)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_FETCH_OR_RELAXED_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_RELAXED_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_OR_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_ACQUIRE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_OR_RELEASE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_RELEASE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_OR_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_ACQ_REL_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_OR_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_SEQ_CST_64)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_FETCH_OR_RELAXED_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_RELAXED_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_OR_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_ACQUIRE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_OR_RELEASE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_RELEASE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_OR_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_ACQ_REL_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_OR_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_SEQ_CST_128)(type, ret, ptr, val) + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_FETCH_OR_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_sub.h b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_sub.h new file mode 100644 index 00000000..00980643 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_sub.h @@ -0,0 +1,98 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_FETCH_SUB_H +#define EASTL_ATOMIC_INTERNAL_MACROS_FETCH_SUB_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_FETCH_SUB_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_ATOMIC_FETCH_SUB_RELAXED_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_RELAXED_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_SUB_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_ACQUIRE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_SUB_RELEASE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_RELEASE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_SUB_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_ACQ_REL_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_SUB_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_SEQ_CST_8)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_FETCH_SUB_RELAXED_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_RELAXED_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_SUB_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_ACQUIRE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_SUB_RELEASE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_RELEASE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_SUB_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_ACQ_REL_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_SUB_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_SEQ_CST_16)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_FETCH_SUB_RELAXED_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_RELAXED_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_SUB_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_ACQUIRE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_SUB_RELEASE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_RELEASE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_SUB_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_ACQ_REL_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_SUB_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_SEQ_CST_32)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_FETCH_SUB_RELAXED_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_RELAXED_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_SUB_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_ACQUIRE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_SUB_RELEASE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_RELEASE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_SUB_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_ACQ_REL_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_SUB_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_SEQ_CST_64)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_FETCH_SUB_RELAXED_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_RELAXED_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_SUB_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_ACQUIRE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_SUB_RELEASE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_RELEASE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_SUB_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_ACQ_REL_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_SUB_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_SEQ_CST_128)(type, ret, ptr, val) + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_FETCH_SUB_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_xor.h b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_xor.h new file mode 100644 index 00000000..2887ea56 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_xor.h @@ -0,0 +1,98 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_FETCH_XOR_H +#define EASTL_ATOMIC_INTERNAL_MACROS_FETCH_XOR_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_FETCH_XOR_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_ATOMIC_FETCH_XOR_RELAXED_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_RELAXED_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_XOR_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_ACQUIRE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_XOR_RELEASE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_RELEASE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_XOR_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_ACQ_REL_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_XOR_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_SEQ_CST_8)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_FETCH_XOR_RELAXED_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_RELAXED_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_XOR_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_ACQUIRE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_XOR_RELEASE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_RELEASE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_XOR_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_ACQ_REL_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_XOR_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_SEQ_CST_16)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_FETCH_XOR_RELAXED_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_RELAXED_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_XOR_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_ACQUIRE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_XOR_RELEASE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_RELEASE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_XOR_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_ACQ_REL_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_XOR_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_SEQ_CST_32)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_FETCH_XOR_RELAXED_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_RELAXED_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_XOR_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_ACQUIRE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_XOR_RELEASE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_RELEASE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_XOR_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_ACQ_REL_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_XOR_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_SEQ_CST_64)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_FETCH_XOR_RELAXED_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_RELAXED_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_XOR_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_ACQUIRE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_XOR_RELEASE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_RELEASE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_XOR_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_ACQ_REL_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_XOR_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_SEQ_CST_128)(type, ret, ptr, val) + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_FETCH_XOR_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_load.h b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_load.h new file mode 100644 index 00000000..76580593 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_load.h @@ -0,0 +1,75 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_LOAD_H +#define EASTL_ATOMIC_INTERNAL_MACROS_LOAD_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_LOAD_*_N(type, type ret, type * ptr) +// +#define EASTL_ATOMIC_LOAD_RELAXED_8(type, ret, ptr) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_RELAXED_8)(type, ret, ptr) + +#define EASTL_ATOMIC_LOAD_ACQUIRE_8(type, ret, ptr) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_ACQUIRE_8)(type, ret, ptr) + +#define EASTL_ATOMIC_LOAD_SEQ_CST_8(type, ret, ptr) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_SEQ_CST_8)(type, ret, ptr) + + +#define EASTL_ATOMIC_LOAD_RELAXED_16(type, ret, ptr) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_RELAXED_16)(type, ret, ptr) + +#define EASTL_ATOMIC_LOAD_ACQUIRE_16(type, ret, ptr) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_ACQUIRE_16)(type, ret, ptr) + +#define EASTL_ATOMIC_LOAD_SEQ_CST_16(type, ret, ptr) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_SEQ_CST_16)(type, ret, ptr) + + +#define EASTL_ATOMIC_LOAD_RELAXED_32(type, ret, ptr) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_RELAXED_32)(type, ret, ptr) + +#define EASTL_ATOMIC_LOAD_ACQUIRE_32(type, ret, ptr) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_ACQUIRE_32)(type, ret, ptr) + +#define EASTL_ATOMIC_LOAD_SEQ_CST_32(type, ret, ptr) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_SEQ_CST_32)(type, ret, ptr) + + +#define EASTL_ATOMIC_LOAD_RELAXED_64(type, ret, ptr) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_RELAXED_64)(type, ret, ptr) + +#define EASTL_ATOMIC_LOAD_ACQUIRE_64(type, ret, ptr) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_ACQUIRE_64)(type, ret, ptr) + +#define EASTL_ATOMIC_LOAD_SEQ_CST_64(type, ret, ptr) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_SEQ_CST_64)(type, ret, ptr) + + +#define EASTL_ATOMIC_LOAD_RELAXED_128(type, ret, ptr) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_RELAXED_128)(type, ret, ptr) + +#define EASTL_ATOMIC_LOAD_ACQUIRE_128(type, ret, ptr) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_ACQUIRE_128)(type, ret, ptr) + +#define EASTL_ATOMIC_LOAD_SEQ_CST_128(type, ret, ptr) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_SEQ_CST_128)(type, ret, ptr) + + +#define EASTL_ATOMIC_LOAD_READ_DEPENDS_32(type, ret, ptr) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_READ_DEPENDS_32)(type, ret, ptr) + +#define EASTL_ATOMIC_LOAD_READ_DEPENDS_64(type, ret, ptr) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_READ_DEPENDS_64)(type, ret, ptr) + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_LOAD_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_memory_barrier.h b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_memory_barrier.h new file mode 100644 index 00000000..14f7be92 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_memory_barrier.h @@ -0,0 +1,38 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_MEMORY_BARRIER_H +#define EASTL_ATOMIC_INTERNAL_MACROS_MEMORY_BARRIER_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_CPU_MB() +// +#define EASTL_ATOMIC_CPU_MB() \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CPU_MB)() + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_CPU_WMB() +// +#define EASTL_ATOMIC_CPU_WMB() \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CPU_WMB)() + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_CPU_RMB() +// +#define EASTL_ATOMIC_CPU_RMB() \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CPU_RMB)() + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_MEMORY_BARRIER_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_or_fetch.h b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_or_fetch.h new file mode 100644 index 00000000..c9ebd6e3 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_or_fetch.h @@ -0,0 +1,98 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_OR_FETCH_H +#define EASTL_ATOMIC_INTERNAL_MACROS_OR_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_OR_FETCH_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_ATOMIC_OR_FETCH_RELAXED_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_RELAXED_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_OR_FETCH_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_ACQUIRE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_OR_FETCH_RELEASE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_RELEASE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_OR_FETCH_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_ACQ_REL_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_OR_FETCH_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_SEQ_CST_8)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_OR_FETCH_RELAXED_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_RELAXED_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_OR_FETCH_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_ACQUIRE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_OR_FETCH_RELEASE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_RELEASE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_OR_FETCH_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_ACQ_REL_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_OR_FETCH_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_SEQ_CST_16)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_OR_FETCH_RELAXED_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_RELAXED_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_OR_FETCH_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_ACQUIRE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_OR_FETCH_RELEASE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_RELEASE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_OR_FETCH_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_ACQ_REL_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_OR_FETCH_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_SEQ_CST_32)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_OR_FETCH_RELAXED_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_RELAXED_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_OR_FETCH_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_ACQUIRE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_OR_FETCH_RELEASE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_RELEASE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_OR_FETCH_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_ACQ_REL_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_OR_FETCH_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_SEQ_CST_64)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_OR_FETCH_RELAXED_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_RELAXED_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_OR_FETCH_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_ACQUIRE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_OR_FETCH_RELEASE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_RELEASE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_OR_FETCH_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_ACQ_REL_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_OR_FETCH_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_SEQ_CST_128)(type, ret, ptr, val) + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_OR_FETCH_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_signal_fence.h b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_signal_fence.h new file mode 100644 index 00000000..dd16b106 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_signal_fence.h @@ -0,0 +1,34 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_SIGNAL_FENCE_H +#define EASTL_ATOMIC_INTERNAL_MACROS_SIGNAL_FENCE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_SIGNAL_FENCE_*() +// +#define EASTL_ATOMIC_SIGNAL_FENCE_RELAXED() \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SIGNAL_FENCE_RELAXED)() + +#define EASTL_ATOMIC_SIGNAL_FENCE_ACQUIRE() \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SIGNAL_FENCE_ACQUIRE)() + +#define EASTL_ATOMIC_SIGNAL_FENCE_RELEASE() \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SIGNAL_FENCE_RELEASE)() + +#define EASTL_ATOMIC_SIGNAL_FENCE_ACQ_REL() \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SIGNAL_FENCE_ACQ_REL)() + +#define EASTL_ATOMIC_SIGNAL_FENCE_SEQ_CST() \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SIGNAL_FENCE_SEQ_CST)() + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_SIGNAL_FENCE_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_store.h b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_store.h new file mode 100644 index 00000000..64b662e1 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_store.h @@ -0,0 +1,68 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_STORE_H +#define EASTL_ATOMIC_INTERNAL_MACROS_STORE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_STORE_*_N(type, type * ptr, type val) +// +#define EASTL_ATOMIC_STORE_RELAXED_8(type, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_RELAXED_8)(type, ptr, val) + +#define EASTL_ATOMIC_STORE_RELEASE_8(type, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_RELEASE_8)(type, ptr, val) + +#define EASTL_ATOMIC_STORE_SEQ_CST_8(type, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_SEQ_CST_8)(type, ptr, val) + + +#define EASTL_ATOMIC_STORE_RELAXED_16(type, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_RELAXED_16)(type, ptr, val) + +#define EASTL_ATOMIC_STORE_RELEASE_16(type, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_RELEASE_16)(type, ptr, val) + +#define EASTL_ATOMIC_STORE_SEQ_CST_16(type, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_SEQ_CST_16)(type, ptr, val) + + +#define EASTL_ATOMIC_STORE_RELAXED_32(type, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_RELAXED_32)(type, ptr, val) + +#define EASTL_ATOMIC_STORE_RELEASE_32(type, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_RELEASE_32)(type, ptr, val) + +#define EASTL_ATOMIC_STORE_SEQ_CST_32(type, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_SEQ_CST_32)(type, ptr, val) + + +#define EASTL_ATOMIC_STORE_RELAXED_64(type, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_RELAXED_64)(type, ptr, val) + +#define EASTL_ATOMIC_STORE_RELEASE_64(type, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_RELEASE_64)(type, ptr, val) + +#define EASTL_ATOMIC_STORE_SEQ_CST_64(type, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_SEQ_CST_64)(type, ptr, val) + + +#define EASTL_ATOMIC_STORE_RELAXED_128(type, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_RELAXED_128)(type, ptr, val) + +#define EASTL_ATOMIC_STORE_RELEASE_128(type, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_RELEASE_128)(type, ptr, val) + +#define EASTL_ATOMIC_STORE_SEQ_CST_128(type, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_SEQ_CST_128)(type, ptr, val) + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_STORE_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_sub_fetch.h b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_sub_fetch.h new file mode 100644 index 00000000..330f38e9 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_sub_fetch.h @@ -0,0 +1,98 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_SUB_FETCH_H +#define EASTL_ATOMIC_INTERNAL_MACROS_SUB_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_SUB_FETCH_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_ATOMIC_SUB_FETCH_RELAXED_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_RELAXED_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_SUB_FETCH_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_ACQUIRE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_SUB_FETCH_RELEASE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_RELEASE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_SUB_FETCH_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_ACQ_REL_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_SUB_FETCH_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_SEQ_CST_8)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_SUB_FETCH_RELAXED_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_RELAXED_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_SUB_FETCH_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_ACQUIRE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_SUB_FETCH_RELEASE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_RELEASE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_SUB_FETCH_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_ACQ_REL_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_SUB_FETCH_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_SEQ_CST_16)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_SUB_FETCH_RELAXED_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_RELAXED_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_SUB_FETCH_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_ACQUIRE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_SUB_FETCH_RELEASE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_RELEASE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_SUB_FETCH_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_ACQ_REL_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_SUB_FETCH_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_SEQ_CST_32)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_SUB_FETCH_RELAXED_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_RELAXED_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_SUB_FETCH_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_ACQUIRE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_SUB_FETCH_RELEASE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_RELEASE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_SUB_FETCH_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_ACQ_REL_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_SUB_FETCH_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_SEQ_CST_64)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_SUB_FETCH_RELAXED_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_RELAXED_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_SUB_FETCH_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_ACQUIRE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_SUB_FETCH_RELEASE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_RELEASE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_SUB_FETCH_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_ACQ_REL_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_SUB_FETCH_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_SEQ_CST_128)(type, ret, ptr, val) + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_SUB_FETCH_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_thread_fence.h b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_thread_fence.h new file mode 100644 index 00000000..26492c59 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_thread_fence.h @@ -0,0 +1,34 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_THREAD_FENCE_H +#define EASTL_ATOMIC_INTERNAL_MACROS_THREAD_FENCE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_THREAD_FENCE_*() +// +#define EASTL_ATOMIC_THREAD_FENCE_RELAXED() \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_THREAD_FENCE_RELAXED)() + +#define EASTL_ATOMIC_THREAD_FENCE_ACQUIRE() \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_THREAD_FENCE_ACQUIRE)() + +#define EASTL_ATOMIC_THREAD_FENCE_RELEASE() \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_THREAD_FENCE_RELEASE)() + +#define EASTL_ATOMIC_THREAD_FENCE_ACQ_REL() \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_THREAD_FENCE_ACQ_REL)() + +#define EASTL_ATOMIC_THREAD_FENCE_SEQ_CST() \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_THREAD_FENCE_SEQ_CST)() + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_THREAD_FENCE_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_xor_fetch.h b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_xor_fetch.h new file mode 100644 index 00000000..42276470 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/atomic_macros/atomic_macros_xor_fetch.h @@ -0,0 +1,98 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_XOR_FETCH_H +#define EASTL_ATOMIC_INTERNAL_MACROS_XOR_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_XOR_FETCH_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_ATOMIC_XOR_FETCH_RELAXED_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_RELAXED_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_XOR_FETCH_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_ACQUIRE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_XOR_FETCH_RELEASE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_RELEASE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_XOR_FETCH_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_ACQ_REL_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_XOR_FETCH_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_SEQ_CST_8)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_XOR_FETCH_RELAXED_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_RELAXED_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_XOR_FETCH_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_ACQUIRE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_XOR_FETCH_RELEASE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_RELEASE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_XOR_FETCH_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_ACQ_REL_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_XOR_FETCH_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_SEQ_CST_16)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_XOR_FETCH_RELAXED_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_RELAXED_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_XOR_FETCH_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_ACQUIRE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_XOR_FETCH_RELEASE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_RELEASE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_XOR_FETCH_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_ACQ_REL_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_XOR_FETCH_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_SEQ_CST_32)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_XOR_FETCH_RELAXED_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_RELAXED_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_XOR_FETCH_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_ACQUIRE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_XOR_FETCH_RELEASE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_RELEASE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_XOR_FETCH_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_ACQ_REL_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_XOR_FETCH_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_SEQ_CST_64)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_XOR_FETCH_RELAXED_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_RELAXED_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_XOR_FETCH_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_ACQUIRE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_XOR_FETCH_RELEASE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_RELEASE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_XOR_FETCH_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_ACQ_REL_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_XOR_FETCH_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_SEQ_CST_128)(type, ret, ptr, val) + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_XOR_FETCH_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/atomic_memory_order.h b/external/EASTL/include/EASTL/internal/atomic/atomic_memory_order.h new file mode 100644 index 00000000..1564d87d --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/atomic_memory_order.h @@ -0,0 +1,44 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MEMORY_ORDER_H +#define EASTL_ATOMIC_INTERNAL_MEMORY_ORDER_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +namespace eastl +{ + + +namespace internal +{ + + +struct memory_order_relaxed_s {}; +struct memory_order_read_depends_s {}; +struct memory_order_acquire_s {}; +struct memory_order_release_s {}; +struct memory_order_acq_rel_s {}; +struct memory_order_seq_cst_s {}; + + +} // namespace internal + + +EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR auto memory_order_relaxed = internal::memory_order_relaxed_s{}; +EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR auto memory_order_read_depends = internal::memory_order_read_depends_s{}; +EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR auto memory_order_acquire = internal::memory_order_acquire_s{}; +EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR auto memory_order_release = internal::memory_order_release_s{}; +EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR auto memory_order_acq_rel = internal::memory_order_acq_rel_s{}; +EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR auto memory_order_seq_cst = internal::memory_order_seq_cst_s{}; + + +} // namespace eastl + + +#endif /* EASTL_ATOMIC_INTERNAL_MEMORY_ORDER_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/atomic_pointer.h b/external/EASTL/include/EASTL/internal/atomic/atomic_pointer.h new file mode 100644 index 00000000..279fa1ba --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/atomic_pointer.h @@ -0,0 +1,289 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_POINTER_H +#define EASTL_ATOMIC_INTERNAL_POINTER_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +namespace eastl +{ + + +namespace internal +{ + + +// 'class' : multiple assignment operators specified +EA_DISABLE_VC_WARNING(4522); + +// misaligned atomic operation may incur significant performance penalty +// The above warning is emitted in earlier versions of clang incorrectly. +// All eastl::atomic objects are size aligned. +// This is static and runtime asserted. +// Thus we disable this warning. +EA_DISABLE_CLANG_WARNING(-Watomic-alignment); + + + template + struct atomic_pointer_base; + +#define EASTL_ATOMIC_POINTER_STATIC_ASSERT_FUNCS_IMPL(funcName) \ + template \ + T* funcName(ptrdiff_t /*arg*/, Order /*order*/) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(T); \ + } \ + \ + template \ + T* funcName(ptrdiff_t /*arg*/, Order /*order*/) volatile EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \ + } \ + \ + T* funcName(ptrdiff_t /*arg*/) volatile EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \ + } + +#define EASTL_ATOMIC_POINTER_STATIC_ASSERT_INC_DEC_OPERATOR_IMPL(operatorOp) \ + T* operator operatorOp() volatile EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \ + } \ + \ + T* operator operatorOp(int) volatile EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \ + } + +#define EASTL_ATOMIC_POINTER_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(operatorOp) \ + T* operator operatorOp(ptrdiff_t /*arg*/) volatile EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \ + } + + + template + struct atomic_pointer_base : public atomic_base_width + { + private: + + using Base = atomic_base_width; + + public: /* ctors */ + + EA_CONSTEXPR atomic_pointer_base(T* desired) EA_NOEXCEPT + : Base{ desired } + { + } + + EA_CONSTEXPR atomic_pointer_base() EA_NOEXCEPT = default; + + atomic_pointer_base(const atomic_pointer_base&) EA_NOEXCEPT = delete; + + public: /* assignment operators */ + + using Base::operator=; + + atomic_pointer_base& operator=(const atomic_pointer_base&) EA_NOEXCEPT = delete; + atomic_pointer_base& operator=(const atomic_pointer_base&) volatile EA_NOEXCEPT = delete; + + public: /* fetch_add */ + + EASTL_ATOMIC_POINTER_STATIC_ASSERT_FUNCS_IMPL(fetch_add) + + public: /* add_fetch */ + + EASTL_ATOMIC_POINTER_STATIC_ASSERT_FUNCS_IMPL(add_fetch) + + public: /* fetch_sub */ + + EASTL_ATOMIC_POINTER_STATIC_ASSERT_FUNCS_IMPL(fetch_sub) + + public: /* sub_fetch */ + + EASTL_ATOMIC_POINTER_STATIC_ASSERT_FUNCS_IMPL(sub_fetch) + + public: /* operator++ && operator-- */ + + EASTL_ATOMIC_POINTER_STATIC_ASSERT_INC_DEC_OPERATOR_IMPL(++) + + EASTL_ATOMIC_POINTER_STATIC_ASSERT_INC_DEC_OPERATOR_IMPL(--) + + public: /* operator+= && operator-= */ + + EASTL_ATOMIC_POINTER_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(+=) + + EASTL_ATOMIC_POINTER_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(-=) + + }; + + + template + struct atomic_pointer_width; + +#define EASTL_ATOMIC_POINTER_FUNC_IMPL(op, bits) \ + T* retVal; \ + { \ + ptr_integral_type retType; \ + ptr_integral_type addend = static_cast(arg) * static_cast(sizeof(T)); \ + \ + EA_PREPROCESSOR_JOIN(op, bits)(ptr_integral_type, retType, EASTL_ATOMIC_INTEGRAL_CAST(ptr_integral_type, this->GetAtomicAddress()), addend); \ + \ + retVal = reinterpret_cast(retType); \ + } \ + return retVal; + +#define EASTL_ATOMIC_POINTER_FETCH_IMPL(funcName, op, bits) \ + T* funcName(ptrdiff_t arg) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_TYPE_IS_OBJECT(T); \ + EASTL_ATOMIC_POINTER_FUNC_IMPL(op, bits); \ + } + +#define EASTL_ATOMIC_POINTER_FETCH_ORDER_IMPL(funcName, orderType, op, bits) \ + T* funcName(ptrdiff_t arg, orderType) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_TYPE_IS_OBJECT(T); \ + EASTL_ATOMIC_POINTER_FUNC_IMPL(op, bits); \ + } + +#define EASTL_ATOMIC_POINTER_FETCH_OP_JOIN(fetchOp, Order) \ + EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_, fetchOp), Order) + +#define EASTL_ATOMIC_POINTER_FETCH_FUNCS_IMPL(funcName, fetchOp, bits) \ + using Base::funcName; \ + \ + EASTL_ATOMIC_POINTER_FETCH_IMPL(funcName, EASTL_ATOMIC_POINTER_FETCH_OP_JOIN(fetchOp, _SEQ_CST_), bits) \ + \ + EASTL_ATOMIC_POINTER_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_relaxed_s, \ + EASTL_ATOMIC_POINTER_FETCH_OP_JOIN(fetchOp, _RELAXED_), bits) \ + \ + EASTL_ATOMIC_POINTER_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_acquire_s, \ + EASTL_ATOMIC_POINTER_FETCH_OP_JOIN(fetchOp, _ACQUIRE_), bits) \ + \ + EASTL_ATOMIC_POINTER_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_release_s, \ + EASTL_ATOMIC_POINTER_FETCH_OP_JOIN(fetchOp, _RELEASE_), bits) \ + \ + EASTL_ATOMIC_POINTER_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_acq_rel_s, \ + EASTL_ATOMIC_POINTER_FETCH_OP_JOIN(fetchOp, _ACQ_REL_), bits) \ + \ + EASTL_ATOMIC_POINTER_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_seq_cst_s, \ + EASTL_ATOMIC_POINTER_FETCH_OP_JOIN(fetchOp, _SEQ_CST_), bits) + +#define EASTL_ATOMIC_POINTER_FETCH_INC_DEC_OPERATOR_IMPL(operatorOp, preFuncName, postFuncName) \ + using Base::operator operatorOp; \ + \ + T* operator operatorOp() EA_NOEXCEPT \ + { \ + return preFuncName(1, eastl::memory_order_seq_cst); \ + } \ + \ + T* operator operatorOp(int) EA_NOEXCEPT \ + { \ + return postFuncName(1, eastl::memory_order_seq_cst); \ + } + +#define EASTL_ATOMIC_POINTER_FETCH_ASSIGNMENT_OPERATOR_IMPL(operatorOp, funcName) \ + using Base::operator operatorOp; \ + \ + T* operator operatorOp(ptrdiff_t arg) EA_NOEXCEPT \ + { \ + return funcName(arg, eastl::memory_order_seq_cst); \ + } + + +#define EASTL_ATOMIC_POINTER_WIDTH_SPECIALIZE(bytes, bits) \ + template \ + struct atomic_pointer_width : public atomic_pointer_base \ + { \ + private: \ + \ + using Base = atomic_pointer_base; \ + using u_ptr_integral_type = EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(uint, bits), _t); \ + using ptr_integral_type = EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(int, bits), _t); \ + \ + public: /* ctors */ \ + \ + EA_CONSTEXPR atomic_pointer_width(T* desired) EA_NOEXCEPT \ + : Base{ desired } \ + { \ + } \ + \ + EA_CONSTEXPR atomic_pointer_width() EA_NOEXCEPT = default; \ + \ + atomic_pointer_width(const atomic_pointer_width&) EA_NOEXCEPT = delete; \ + \ + public: /* assignment operators */ \ + \ + using Base::operator=; \ + \ + atomic_pointer_width& operator=(const atomic_pointer_width&) EA_NOEXCEPT = delete; \ + atomic_pointer_width& operator=(const atomic_pointer_width&) volatile EA_NOEXCEPT = delete; \ + \ + public: /* fetch_add */ \ + \ + EASTL_ATOMIC_POINTER_FETCH_FUNCS_IMPL(fetch_add, FETCH_ADD, bits) \ + \ + public: /* add_fetch */ \ + \ + EASTL_ATOMIC_POINTER_FETCH_FUNCS_IMPL(add_fetch, ADD_FETCH, bits) \ + \ + public: /* fetch_sub */ \ + \ + EASTL_ATOMIC_POINTER_FETCH_FUNCS_IMPL(fetch_sub, FETCH_SUB, bits) \ + \ + public: /* sub_fetch */ \ + \ + EASTL_ATOMIC_POINTER_FETCH_FUNCS_IMPL(sub_fetch, SUB_FETCH, bits) \ + \ + public: /* operator++ && operator-- */ \ + \ + EASTL_ATOMIC_POINTER_FETCH_INC_DEC_OPERATOR_IMPL(++, add_fetch, fetch_add) \ + \ + EASTL_ATOMIC_POINTER_FETCH_INC_DEC_OPERATOR_IMPL(--, sub_fetch, fetch_sub) \ + \ + public: /* operator+= && operator-= */ \ + \ + EASTL_ATOMIC_POINTER_FETCH_ASSIGNMENT_OPERATOR_IMPL(+=, add_fetch) \ + \ + EASTL_ATOMIC_POINTER_FETCH_ASSIGNMENT_OPERATOR_IMPL(-=, sub_fetch) \ + \ + public: \ + \ + using Base::load; \ + \ + T* load(eastl::internal::memory_order_read_depends_s) EA_NOEXCEPT \ + { \ + T* retPointer; \ + EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_LOAD_READ_DEPENDS_, bits)(T*, retPointer, this->GetAtomicAddress()); \ + return retPointer; \ + } \ + }; + + +#if defined(EASTL_ATOMIC_HAS_32BIT) && EA_PLATFORM_PTR_SIZE == 4 + EASTL_ATOMIC_POINTER_WIDTH_SPECIALIZE(4, 32) +#endif + +#if defined(EASTL_ATOMIC_HAS_64BIT) && EA_PLATFORM_PTR_SIZE == 8 + EASTL_ATOMIC_POINTER_WIDTH_SPECIALIZE(8, 64) +#endif + +EA_RESTORE_VC_WARNING(); + +EA_RESTORE_CLANG_WARNING(); + + +} // namespace internal + + +} // namespace eastl + +#endif /* EASTL_ATOMIC_INTERNAL_POINTER_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/atomic_size_aligned.h b/external/EASTL/include/EASTL/internal/atomic/atomic_size_aligned.h new file mode 100644 index 00000000..c272335d --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/atomic_size_aligned.h @@ -0,0 +1,205 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_SIZE_ALIGNED_H +#define EASTL_ATOMIC_INTERNAL_SIZE_ALIGNED_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +namespace eastl +{ + + +namespace internal +{ + + +// 'class' : multiple assignment operators specified +EA_DISABLE_VC_WARNING(4522); + +// misaligned atomic operation may incur significant performance penalty +// The above warning is emitted in earlier versions of clang incorrectly. +// All eastl::atomic objects are size aligned. +// This is static and runtime asserted. +// Thus we disable this warning. +EA_DISABLE_CLANG_WARNING(-Watomic-alignment); + + +#define EASTL_ATOMIC_SIZE_ALIGNED_STATIC_ASSERT_CMPXCHG_IMPL(funcName) \ + template \ + bool funcName(T& /*expected*/, T /*desired*/, \ + OrderSuccess /*orderSuccess*/, \ + OrderFailure /*orderFailure*/) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(T); \ + return false; \ + } \ + \ + template \ + bool funcName(T& /*expected*/, T /*desired*/, \ + OrderSuccess /*orderSuccess*/, \ + OrderFailure /*orderFailure*/) volatile EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \ + return false; \ + } \ + \ + template \ + bool funcName(T& /*expected*/, T /*desired*/, \ + Order /*order*/) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(T); \ + return false; \ + } \ + \ + template \ + bool funcName(T& /*expected*/, T /*desired*/, \ + Order /*order*/) volatile EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \ + return false; \ + } \ + \ + bool funcName(T& /*expected*/, T /*desired*/) volatile EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \ + return false; \ + } + +#define EASTL_ATOMIC_SIZE_ALIGNED_STATIC_ASSERT_CMPXCHG_WEAK_IMPL() \ + EASTL_ATOMIC_SIZE_ALIGNED_STATIC_ASSERT_CMPXCHG_IMPL(compare_exchange_weak) + +#define EASTL_ATOMIC_SIZE_ALIGNED_STATIC_ASSERT_CMPXCHG_STRONG_IMPL() \ + EASTL_ATOMIC_SIZE_ALIGNED_STATIC_ASSERT_CMPXCHG_IMPL(compare_exchange_strong) + + + template + struct atomic_size_aligned + { + public: /* ctors */ + + EA_CONSTEXPR atomic_size_aligned(T desired) EA_NOEXCEPT + : mAtomic{ desired } + { + } + + EA_CONSTEXPR atomic_size_aligned() EA_NOEXCEPT_IF(eastl::is_nothrow_default_constructible_v) + : mAtomic{} /* Value-Initialize which will Zero-Initialize Trivial Constructible types */ + { + } + + atomic_size_aligned(const atomic_size_aligned&) EA_NOEXCEPT = delete; + + public: /* store */ + + template + void store(T /*desired*/, Order /*order*/) EA_NOEXCEPT + { + EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(T); + } + + template + void store(T /*desired*/, Order /*order*/) volatile EA_NOEXCEPT + { + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); + } + + void store(T /*desired*/) volatile EA_NOEXCEPT + { + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); + } + + public: /* load */ + + template + T load(Order /*order*/) const EA_NOEXCEPT + { + EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(T); + } + + template + T load(Order /*order*/) const volatile EA_NOEXCEPT + { + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); + } + + T load() const volatile EA_NOEXCEPT + { + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); + } + + public: /* exchange */ + + template + T exchange(T /*desired*/, Order /*order*/) EA_NOEXCEPT + { + EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(T); + } + + template + T exchange(T /*desired*/, Order /*order*/) volatile EA_NOEXCEPT + { + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); + } + + T exchange(T /*desired*/) volatile EA_NOEXCEPT + { + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); + } + + public: /* compare_exchange_weak */ + + EASTL_ATOMIC_SIZE_ALIGNED_STATIC_ASSERT_CMPXCHG_WEAK_IMPL() + + public: /* compare_exchange_strong */ + + EASTL_ATOMIC_SIZE_ALIGNED_STATIC_ASSERT_CMPXCHG_STRONG_IMPL() + + public: /* assignment operator */ + + T operator=(T /*desired*/) volatile EA_NOEXCEPT + { + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); + } + + atomic_size_aligned& operator=(const atomic_size_aligned&) EA_NOEXCEPT = delete; + atomic_size_aligned& operator=(const atomic_size_aligned&) volatile EA_NOEXCEPT = delete; + + protected: /* Accessors */ + + T* GetAtomicAddress() const EA_NOEXCEPT + { + return eastl::addressof(mAtomic); + } + + private: + + /** + * Some compilers such as MSVC will align 64-bit values on 32-bit machines on + * 4-byte boundaries which can ruin the atomicity guarantees. + * + * Ensure everything is size aligned. + * + * mutable is needed in cases such as when loads are only guaranteed to be atomic + * using a compare exchange, such as for 128-bit atomics, so we need to be able + * to have write access to the variable as one example. + */ + EA_ALIGN(sizeof(T)) mutable T mAtomic; + }; + +EA_RESTORE_VC_WARNING(); + +EA_RESTORE_CLANG_WARNING(); + + +} // namespace internal + + +} // namespace eastl + +#endif /* EASTL_ATOMIC_INTERNAL_SIZE_ALIGNED_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/atomic_standalone.h b/external/EASTL/include/EASTL/internal/atomic/atomic_standalone.h new file mode 100644 index 00000000..011d5fb3 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/atomic_standalone.h @@ -0,0 +1,470 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_STANDALONE_H +#define EASTL_ATOMIC_INTERNAL_STANDALONE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +namespace eastl +{ + + +//////////////////////////////////////////////////////////////////////////////// +// +// bool atomic_compare_exchange_strong(eastl::atomic*, T* expected, T desired) +// +template +EASTL_FORCE_INLINE bool atomic_compare_exchange_strong(eastl::atomic* atomicObj, + typename eastl::atomic::value_type* expected, + typename eastl::atomic::value_type desired) EA_NOEXCEPT +{ + return atomicObj->compare_exchange_strong(*expected, desired); +} + +template +EASTL_FORCE_INLINE bool atomic_compare_exchange_strong_explicit(eastl::atomic* atomicObj, + typename eastl::atomic::value_type* expected, + typename eastl::atomic::value_type desired, + OrderSuccess orderSuccess, OrderFailure orderFailure) EA_NOEXCEPT +{ + return atomicObj->compare_exchange_strong(*expected, desired, orderSuccess, orderFailure); +} + + +//////////////////////////////////////////////////////////////////////////////// +// +// bool atomic_compare_exchange_weak(eastl::atomic*, T* expected, T desired) +// +template +EASTL_FORCE_INLINE bool atomic_compare_exchange_weak(eastl::atomic* atomicObj, + typename eastl::atomic::value_type* expected, + typename eastl::atomic::value_type desired) EA_NOEXCEPT +{ + return atomicObj->compare_exchange_weak(*expected, desired); +} + +template +EASTL_FORCE_INLINE bool atomic_compare_exchange_weak_explicit(eastl::atomic* atomicObj, + typename eastl::atomic::value_type* expected, + typename eastl::atomic::value_type desired, + OrderSuccess orderSuccess, OrderFailure orderFailure) EA_NOEXCEPT +{ + return atomicObj->compare_exchange_weak(*expected, desired, orderSuccess, orderFailure); +} + + +//////////////////////////////////////////////////////////////////////////////// +// +// T atomic_fetch_xor(eastl::atomic*, T arg) +// +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_fetch_xor(eastl::atomic* atomicObj, + typename eastl::atomic::value_type arg) EA_NOEXCEPT +{ + return atomicObj->fetch_xor(arg); +} + +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_fetch_xor_explicit(eastl::atomic* atomicObj, + typename eastl::atomic::value_type arg, + Order order) EA_NOEXCEPT +{ + return atomicObj->fetch_xor(arg, order); +} + + +//////////////////////////////////////////////////////////////////////////////// +// +// T atomic_xor_fetch(eastl::atomic*, T arg) +// +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_xor_fetch(eastl::atomic* atomicObj, + typename eastl::atomic::value_type arg) EA_NOEXCEPT +{ + return atomicObj->xor_fetch(arg); +} + +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_xor_fetch_explicit(eastl::atomic* atomicObj, + typename eastl::atomic::value_type arg, + Order order) EA_NOEXCEPT +{ + return atomicObj->xor_fetch(arg, order); +} + + +//////////////////////////////////////////////////////////////////////////////// +// +// T atomic_fetch_or(eastl::atomic*, T arg) +// +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_fetch_or(eastl::atomic* atomicObj, + typename eastl::atomic::value_type arg) EA_NOEXCEPT +{ + return atomicObj->fetch_or(arg); +} + +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_fetch_or_explicit(eastl::atomic* atomicObj, + typename eastl::atomic::value_type arg, + Order order) EA_NOEXCEPT +{ + return atomicObj->fetch_or(arg, order); +} + + +//////////////////////////////////////////////////////////////////////////////// +// +// T atomic_or_fetch(eastl::atomic*, T arg) +// +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_or_fetch(eastl::atomic* atomicObj, + typename eastl::atomic::value_type arg) EA_NOEXCEPT +{ + return atomicObj->or_fetch(arg); +} + +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_or_fetch_explicit(eastl::atomic* atomicObj, + typename eastl::atomic::value_type arg, + Order order) EA_NOEXCEPT +{ + return atomicObj->or_fetch(arg, order); +} + + +//////////////////////////////////////////////////////////////////////////////// +// +// T atomic_fetch_and(eastl::atomic*, T arg) +// +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_fetch_and(eastl::atomic* atomicObj, + typename eastl::atomic::value_type arg) EA_NOEXCEPT +{ + return atomicObj->fetch_and(arg); +} + +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_fetch_and_explicit(eastl::atomic* atomicObj, + typename eastl::atomic::value_type arg, + Order order) EA_NOEXCEPT +{ + return atomicObj->fetch_and(arg, order); +} + + +//////////////////////////////////////////////////////////////////////////////// +// +// T atomic_and_fetch(eastl::atomic*, T arg) +// +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_and_fetch(eastl::atomic* atomicObj, + typename eastl::atomic::value_type arg) EA_NOEXCEPT +{ + return atomicObj->and_fetch(arg); +} + +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_and_fetch_explicit(eastl::atomic* atomicObj, + typename eastl::atomic::value_type arg, + Order order) EA_NOEXCEPT +{ + return atomicObj->and_fetch(arg, order); +} + + +///////////////////////////////////////////////////////////////////////////////// +// +// T atomic_fetch_sub(eastl::atomic*, T arg) +// +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_fetch_sub(eastl::atomic* atomicObj, + typename eastl::atomic::difference_type arg) EA_NOEXCEPT +{ + return atomicObj->fetch_sub(arg); +} + +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_fetch_sub_explicit(eastl::atomic* atomicObj, + typename eastl::atomic::difference_type arg, + Order order) EA_NOEXCEPT +{ + return atomicObj->fetch_sub(arg, order); +} + + +///////////////////////////////////////////////////////////////////////////////// +// +// T atomic_sub_fetch(eastl::atomic*, T arg) +// +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_sub_fetch(eastl::atomic* atomicObj, + typename eastl::atomic::difference_type arg) EA_NOEXCEPT +{ + return atomicObj->sub_fetch(arg); +} + +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_sub_fetch_explicit(eastl::atomic* atomicObj, + typename eastl::atomic::difference_type arg, + Order order) EA_NOEXCEPT +{ + return atomicObj->sub_fetch(arg, order); +} + + +///////////////////////////////////////////////////////////////////////////////// +// +// T atomic_fetch_add(eastl::atomic*, T arg) +// +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_fetch_add(eastl::atomic* atomicObj, + typename eastl::atomic::difference_type arg) EA_NOEXCEPT +{ + return atomicObj->fetch_add(arg); +} + +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_fetch_add_explicit(eastl::atomic* atomicObj, + typename eastl::atomic::difference_type arg, + Order order) EA_NOEXCEPT +{ + return atomicObj->fetch_add(arg, order); +} + + +///////////////////////////////////////////////////////////////////////////////// +// +// T atomic_add_fetch(eastl::atomic*, T arg) +// +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_add_fetch(eastl::atomic* atomicObj, + typename eastl::atomic::difference_type arg) EA_NOEXCEPT +{ + return atomicObj->add_fetch(arg); +} + +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_add_fetch_explicit(eastl::atomic* atomicObj, + typename eastl::atomic::difference_type arg, + Order order) EA_NOEXCEPT +{ + return atomicObj->add_fetch(arg, order); +} + + +///////////////////////////////////////////////////////////////////////////////// +// +// T atomic_exchange(eastl::atomic*, T desired) +// +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_exchange(eastl::atomic* atomicObj, + typename eastl::atomic::value_type desired) EA_NOEXCEPT +{ + return atomicObj->exchange(desired); +} + +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_exchange_explicit(eastl::atomic* atomicObj, + typename eastl::atomic::value_type desired, + Order order) EA_NOEXCEPT +{ + return atomicObj->exchange(desired, order); +} + + +///////////////////////////////////////////////////////////////////////////////// +// +// T atomic_load(const eastl::atomic*) +// +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_load(const eastl::atomic* atomicObj) EA_NOEXCEPT +{ + return atomicObj->load(); +} + +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_load_explicit(const eastl::atomic* atomicObj, Order order) EA_NOEXCEPT +{ + return atomicObj->load(order); +} + + +///////////////////////////////////////////////////////////////////////////////// +// +// T atomic_load_cond(const eastl::atomic*) +// +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_load_cond(const eastl::atomic* atomicObj, Predicate pred) EA_NOEXCEPT +{ + for (;;) + { + typename eastl::atomic::value_type ret = atomicObj->load(); + + if (pred(ret)) + { + return ret; + } + + EASTL_ATOMIC_CPU_PAUSE(); + } +} + +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_load_cond_explicit(const eastl::atomic* atomicObj, Predicate pred, Order order) EA_NOEXCEPT +{ + for (;;) + { + typename eastl::atomic::value_type ret = atomicObj->load(order); + + if (pred(ret)) + { + return ret; + } + + EASTL_ATOMIC_CPU_PAUSE(); + } +} + + +///////////////////////////////////////////////////////////////////////////////// +// +// void atomic_store(eastl::atomic*, T) +// +template +EASTL_FORCE_INLINE void atomic_store(eastl::atomic* atomicObj, typename eastl::atomic::value_type desired) EA_NOEXCEPT +{ + atomicObj->store(desired); +} + +template +EASTL_FORCE_INLINE void atomic_store_explicit(eastl::atomic* atomicObj, typename eastl::atomic::value_type desired, Order order) EA_NOEXCEPT +{ + atomicObj->store(desired, order); +} + + +///////////////////////////////////////////////////////////////////////////////// +// +// void eastl::atomic_thread_fence(Order) +// +template +EASTL_FORCE_INLINE void atomic_thread_fence(Order) EA_NOEXCEPT +{ + EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(Order); +} + +EASTL_FORCE_INLINE void atomic_thread_fence(eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT +{ + EASTL_ATOMIC_THREAD_FENCE_RELAXED(); +} + +EASTL_FORCE_INLINE void atomic_thread_fence(eastl::internal::memory_order_acquire_s) EA_NOEXCEPT +{ + EASTL_ATOMIC_THREAD_FENCE_ACQUIRE(); +} + +EASTL_FORCE_INLINE void atomic_thread_fence(eastl::internal::memory_order_release_s) EA_NOEXCEPT +{ + EASTL_ATOMIC_THREAD_FENCE_RELEASE(); +} + +EASTL_FORCE_INLINE void atomic_thread_fence(eastl::internal::memory_order_acq_rel_s) EA_NOEXCEPT +{ + EASTL_ATOMIC_THREAD_FENCE_ACQ_REL(); +} + +EASTL_FORCE_INLINE void atomic_thread_fence(eastl::internal::memory_order_seq_cst_s) EA_NOEXCEPT +{ + EASTL_ATOMIC_THREAD_FENCE_SEQ_CST(); +} + + +///////////////////////////////////////////////////////////////////////////////// +// +// void eastl::atomic_signal_fence(Order) +// +template +EASTL_FORCE_INLINE void atomic_signal_fence(Order) EA_NOEXCEPT +{ + EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(Order); +} + +EASTL_FORCE_INLINE void atomic_signal_fence(eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT +{ + EASTL_ATOMIC_SIGNAL_FENCE_RELAXED(); +} + +EASTL_FORCE_INLINE void atomic_signal_fence(eastl::internal::memory_order_acquire_s) EA_NOEXCEPT +{ + EASTL_ATOMIC_SIGNAL_FENCE_ACQUIRE(); +} + +EASTL_FORCE_INLINE void atomic_signal_fence(eastl::internal::memory_order_release_s) EA_NOEXCEPT +{ + EASTL_ATOMIC_SIGNAL_FENCE_RELEASE(); +} + +EASTL_FORCE_INLINE void atomic_signal_fence(eastl::internal::memory_order_acq_rel_s) EA_NOEXCEPT +{ + EASTL_ATOMIC_SIGNAL_FENCE_ACQ_REL(); +} + +EASTL_FORCE_INLINE void atomic_signal_fence(eastl::internal::memory_order_seq_cst_s) EA_NOEXCEPT +{ + EASTL_ATOMIC_SIGNAL_FENCE_SEQ_CST(); +} + + +///////////////////////////////////////////////////////////////////////////////// +// +// void eastl::compiler_barrier() +// +EASTL_FORCE_INLINE void compiler_barrier() EA_NOEXCEPT +{ + EASTL_ATOMIC_COMPILER_BARRIER(); +} + + +///////////////////////////////////////////////////////////////////////////////// +// +// void eastl::compiler_barrier_data_dependency(const T&) +// +template +EASTL_FORCE_INLINE void compiler_barrier_data_dependency(const T& val) EA_NOEXCEPT +{ + EASTL_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY(val, T); +} + + +///////////////////////////////////////////////////////////////////////////////// +// +// void eastl::cpu_pause() +// +EASTL_FORCE_INLINE void cpu_pause() EA_NOEXCEPT +{ + EASTL_ATOMIC_CPU_PAUSE(); +} + + +///////////////////////////////////////////////////////////////////////////////// +// +// bool eastl::atomic_is_lock_free(eastl::atomic*) +// +template +EASTL_FORCE_INLINE bool atomic_is_lock_free(const eastl::atomic* atomicObj) EA_NOEXCEPT +{ + return atomicObj->is_lock_free(); +} + + +} // namespace eastl + + +#endif /* EASTL_ATOMIC_INTERNAL_STANDALONE_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/compiler.h b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler.h new file mode 100644 index 00000000..fc128795 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler.h @@ -0,0 +1,120 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// Include the compiler specific implementations +// +#if defined(EA_COMPILER_GNUC) || defined(__clang__) + + #include "gcc/compiler_gcc.h" + +#elif defined(EA_COMPILER_MSVC) + + #include "msvc/compiler_msvc.h" + +#endif + + +///////////////////////////////////////////////////////////////////////////////// + + +namespace eastl +{ + + +namespace internal +{ + + +/** + * NOTE: + * + * This can be used by specific compiler implementations to implement a data dependency compiler barrier. + * Some compiler barriers do not take in input dependencies as is possible with the gcc asm syntax. + * Thus we need a way to create a false dependency on the input variable so the compiler does not dead-store + * remove it. + * A volatile function pointer ensures the compiler must always load the function pointer and call thru it + * since the compiler cannot reason about any side effects. Thus the compiler must always assume the + * input variable may be accessed and thus cannot be dead-stored. This technique works even in the presence + * of Link-Time Optimization. A compiler barrier with a data dependency is useful in these situations. + * + * void foo() + * { + * eastl::vector v; + * while (Benchmark.ContinueRunning()) + * { + * v.push_back(0); + * eastl::compiler_barrier(); OR eastl::compiler_barrier_data_dependency(v); + * } + * } + * + * We are trying to benchmark the push_back function of a vector. The vector v has only local scope. + * The compiler is well within its writes to remove all accesses to v even with the compiler barrier + * because there are no observable uses of the vector v. + * The compiler barrier data dependency ensures there is an input dependency on the variable so that + * it isn't removed. This is also useful when writing test code that the compiler may remove. + */ + +typedef void (*CompilerBarrierDataDependencyFuncPtr)(void*); + +extern EASTL_API volatile CompilerBarrierDataDependencyFuncPtr gCompilerBarrierDataDependencyFunc; + + +#define EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY_FUNC(ptr) \ + eastl::internal::gCompilerBarrierDataDependencyFunc(ptr) + + +} // namespace internal + + +} // namespace eastl + + +///////////////////////////////////////////////////////////////////////////////// + + +#include "compiler_fetch_add.h" +#include "compiler_fetch_sub.h" + +#include "compiler_fetch_and.h" +#include "compiler_fetch_xor.h" +#include "compiler_fetch_or.h" + +#include "compiler_add_fetch.h" +#include "compiler_sub_fetch.h" + +#include "compiler_and_fetch.h" +#include "compiler_xor_fetch.h" +#include "compiler_or_fetch.h" + +#include "compiler_exchange.h" + +#include "compiler_cmpxchg_weak.h" +#include "compiler_cmpxchg_strong.h" + +#include "compiler_load.h" +#include "compiler_store.h" + +#include "compiler_barrier.h" + +#include "compiler_cpu_pause.h" + +#include "compiler_memory_barrier.h" + +#include "compiler_signal_fence.h" + +#include "compiler_thread_fence.h" + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_add_fetch.h b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_add_fetch.h new file mode 100644 index 00000000..763921c4 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_add_fetch.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_ADD_FETCH_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_ADD_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_ADD_FETCH_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_8) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_8) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_8) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_8) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_16) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_16) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_16) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_16) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_32) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_32) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_32) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_32) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_64) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_64) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_64) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_64) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_128) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_128) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_128) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_128) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_ADD_FETCH_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_and_fetch.h b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_and_fetch.h new file mode 100644 index 00000000..7b1e0a42 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_and_fetch.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_AND_FETCH_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_AND_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_AND_FETCH_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_8) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_8) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_8) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_8) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_16) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_16) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_16) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_16) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_32) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_32) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_32) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_32) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_64) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_64) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_64) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_64) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_128) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_128) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_128) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_128) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_AND_FETCH_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_barrier.h b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_barrier.h new file mode 100644 index 00000000..550070e3 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_barrier.h @@ -0,0 +1,36 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_BARRIER_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_BARRIER_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_COMPILER_BARRIER() +// +#if defined(EASTL_COMPILER_ATOMIC_COMPILER_BARRIER) + #define EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_AVAILABLE 0 +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY(const T&, type) +// +#if defined(EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY) + #define EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_BARRIER_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_cmpxchg_strong.h b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_cmpxchg_strong.h new file mode 100644 index 00000000..2ee29711 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_cmpxchg_strong.h @@ -0,0 +1,430 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_CMPXCHG_STRONG_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_CMPXCHG_STRONG_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_*_*_N(type, bool ret, type * ptr, type * expected, type desired) +// +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128_AVAILABLE 0 +#endif + + +///////////////////////////////////////////////////////////////////////////////// + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_*_N(type, bool ret, type * ptr, type * expected, type desired) +// +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_8_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_8_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_8_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_8(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_8_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_8(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_8_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_8(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_16_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_16_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_16_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_16(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_16_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_16(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_16_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_16(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16(type, ret, ptr, expected, desired) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_32_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_32_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_32_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_32(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_32_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_32(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_32_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_32(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_64_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_64_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_64_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_64(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_64_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_64(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_64_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_64(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_128_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_128_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_128_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_128(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_128_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_128(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_128_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_128(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_CMPXCHG_STRONG_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_cmpxchg_weak.h b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_cmpxchg_weak.h new file mode 100644 index 00000000..9bc1a621 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_cmpxchg_weak.h @@ -0,0 +1,430 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_CMPXCHG_WEAK_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_CMPXCHG_WEAK_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_*_*_N(type, bool ret, type * ptr, type * expected, type desired) +// +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_8) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_16) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_32) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_64) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128_AVAILABLE 0 +#endif + + +///////////////////////////////////////////////////////////////////////////////// + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_*_N(type, bool ret, type * ptr, type * expected, type desired) +// +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_8_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_8_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_8_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_8(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_8_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_8(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_8_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_8(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_16_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_16_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_16_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_16(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_16_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_16(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_16_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_16(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16(type, ret, ptr, expected, desired) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_32_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_32_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_32_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_32(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_32_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_32(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_32_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_32(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_64_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_64_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_64_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_64(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_64_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_64(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_64_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_64(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_128_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_128_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_128_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_128(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_128_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_128(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_128_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_128(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_CMPXCHG_WEAK_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_cpu_pause.h b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_cpu_pause.h new file mode 100644 index 00000000..073b3fbb --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_cpu_pause.h @@ -0,0 +1,32 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_CPU_PAUSE_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_CPU_PAUSE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_CPU_PAUSE() +// +#if defined(EASTL_COMPILER_ATOMIC_CPU_PAUSE) + + #define EASTL_COMPILER_ATOMIC_CPU_PAUSE_AVAILABLE 1 + +#else + + #define EASTL_COMPILER_ATOMIC_CPU_PAUSE() \ + ((void)0) + + #define EASTL_COMPILER_ATOMIC_CPU_PAUSE_AVAILABLE 1 + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_CPU_PAUSE_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_exchange.h b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_exchange.h new file mode 100644 index 00000000..d82b199d --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_exchange.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_EXCHANGE_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_EXCHANGE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_EXCHANGE_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_8) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_8) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_8) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_8) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_16) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_16) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_16) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_16) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_32) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_32) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_32) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_32) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_64) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_64) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_64) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_64) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_128) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_128) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_128) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_128) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_EXCHANGE_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_fetch_add.h b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_fetch_add.h new file mode 100644 index 00000000..e6c4238f --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_fetch_add.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_ADD_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_ADD_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_FETCH_ADD_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_8) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_8) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_8) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_8) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_16) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_16) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_16) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_16) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_32) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_32) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_32) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_32) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_64) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_64) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_64) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_64) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_128) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_128) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_128) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_128) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_ADD_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_fetch_and.h b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_fetch_and.h new file mode 100644 index 00000000..b0976fc7 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_fetch_and.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_AND_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_AND_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_FETCH_AND_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_8) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_8) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_8) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_8) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_16) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_16) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_16) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_16) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_32) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_32) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_32) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_32) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_64) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_64) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_64) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_64) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_128) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_128) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_128) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_128) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_AND_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_fetch_or.h b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_fetch_or.h new file mode 100644 index 00000000..2e6cfdac --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_fetch_or.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_OR_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_OR_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_FETCH_OR_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_8) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_8) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_8) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_8) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_16) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_16) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_16) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_16) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_32) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_32) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_32) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_32) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_64) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_64) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_64) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_64) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_128) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_128) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_128) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_128) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_OR_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_fetch_sub.h b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_fetch_sub.h new file mode 100644 index 00000000..d7ed86cc --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_fetch_sub.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_SUB_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_SUB_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_FETCH_SUB_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_8) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_8) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_8) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_8) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_16) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_16) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_16) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_16) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_32) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_32) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_32) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_32) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_64) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_64) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_64) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_64) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_128) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_128) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_128) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_128) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_SUB_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_fetch_xor.h b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_fetch_xor.h new file mode 100644 index 00000000..10cf7d90 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_fetch_xor.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_XOR_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_XOR_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_FETCH_XOR_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_8) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_8) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_8) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_8) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_16) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_16) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_16) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_16) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_32) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_32) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_32) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_32) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_64) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_64) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_64) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_64) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_128) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_128) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_128) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_128) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_XOR_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_load.h b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_load.h new file mode 100644 index 00000000..734dbb80 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_load.h @@ -0,0 +1,139 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_LOAD_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_LOAD_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_LOAD_*_N(type, type ret, type * ptr) +// +#if defined(EASTL_COMPILER_ATOMIC_LOAD_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_8) + #define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_8) + #define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_LOAD_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_16) + #define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_16) + #define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_LOAD_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_32) + #define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_32) + #define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_LOAD_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_64) + #define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_64) + #define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_LOAD_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_128) + #define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_128) + #define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_128_AVAILABLE 0 +#endif + + +/** + * NOTE: + * + * These are used for data-dependent reads thru a pointer. It is safe + * to assume that pointer-sized reads are atomic on any given platform. + * This implementation assumes the hardware doesn't reorder dependent + * loads unlike the DEC Alpha. + */ +#define EASTL_COMPILER_ATOMIC_LOAD_READ_DEPENDS_N(type, ret, ptr) \ + { \ + static_assert(eastl::is_pointer_v, "eastl::atomic : Read Depends Type must be a Pointer Type!"); \ + static_assert(eastl::is_pointer_v>, "eastl::atomic : Read Depends Ptr must be a Pointer to a Pointer!"); \ + \ + ret = (*EASTL_ATOMIC_VOLATILE_CAST(ptr)); \ + } + +#define EASTL_COMPILER_ATOMIC_LOAD_READ_DEPENDS_32(type, ret, ptr) \ + EASTL_COMPILER_ATOMIC_LOAD_READ_DEPENDS_N(type, ret, ptr) + +#define EASTL_COMPILER_ATOMIC_LOAD_READ_DEPENDS_64(type, ret, ptr) \ + EASTL_COMPILER_ATOMIC_LOAD_READ_DEPENDS_N(type, ret, ptr) + +#define EASTL_COMPILER_ATOMIC_LOAD_READ_DEPENDS_32_AVAILABLE 1 +#define EASTL_COMPILER_ATOMIC_LOAD_READ_DEPENDS_64_AVAILABLE 1 + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_LOAD_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_memory_barrier.h b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_memory_barrier.h new file mode 100644 index 00000000..ac3923c6 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_memory_barrier.h @@ -0,0 +1,47 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MEMORY_BARRIER_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_MEMORY_BARRIER_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_CPU_MB() +// +#if defined(EASTL_COMPILER_ATOMIC_CPU_MB) + #define EASTL_COMPILER_ATOMIC_CPU_MB_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CPU_MB_AVAILABLE 0 +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_CPU_WMB() +// +#if defined(EASTL_COMPILER_ATOMIC_CPU_WMB) + #define EASTL_COMPILER_ATOMIC_CPU_WMB_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CPU_WMB_AVAILABLE 0 +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_CPU_RMB() +// +#if defined(EASTL_COMPILER_ATOMIC_CPU_RMB) + #define EASTL_COMPILER_ATOMIC_CPU_RMB_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CPU_RMB_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MEMORY_BARRIER_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_or_fetch.h b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_or_fetch.h new file mode 100644 index 00000000..a26a72c7 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_or_fetch.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_OR_FETCH_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_OR_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_OR_FETCH_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_8) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_8) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_8) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_8) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_16) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_16) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_16) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_16) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_32) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_32) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_32) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_32) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_64) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_64) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_64) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_64) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_128) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_128) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_128) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_128) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_OR_FETCH_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_signal_fence.h b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_signal_fence.h new file mode 100644 index 00000000..25b0b741 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_signal_fence.h @@ -0,0 +1,49 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_SIGNAL_FENCE_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_SIGNAL_FENCE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_*() +// +#if defined(EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_RELAXED) + #define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_RELAXED_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_RELAXED_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_ACQUIRE) + #define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_ACQUIRE_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_ACQUIRE_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_RELEASE) + #define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_RELEASE_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_RELEASE_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_ACQ_REL) + #define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_ACQ_REL_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_ACQ_REL_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_SEQ_CST) + #define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_SEQ_CST_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_SEQ_CST_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_SIGNAL_FENCE_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_store.h b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_store.h new file mode 100644 index 00000000..1a553e2a --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_store.h @@ -0,0 +1,113 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_STORE_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_STORE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_STORE_*_N(type, type * ptr, type val) +// +#if defined(EASTL_COMPILER_ATOMIC_STORE_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_STORE_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_STORE_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_STORE_RELEASE_8) + #define EASTL_COMPILER_ATOMIC_STORE_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_STORE_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_8) + #define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_STORE_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_STORE_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_STORE_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_STORE_RELEASE_16) + #define EASTL_COMPILER_ATOMIC_STORE_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_STORE_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_16) + #define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_STORE_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_STORE_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_STORE_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_STORE_RELEASE_32) + #define EASTL_COMPILER_ATOMIC_STORE_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_STORE_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_32) + #define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_STORE_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_STORE_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_STORE_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_STORE_RELEASE_64) + #define EASTL_COMPILER_ATOMIC_STORE_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_STORE_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_64) + #define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_STORE_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_STORE_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_STORE_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_STORE_RELEASE_128) + #define EASTL_COMPILER_ATOMIC_STORE_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_STORE_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_128) + #define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_STORE_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_sub_fetch.h b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_sub_fetch.h new file mode 100644 index 00000000..4b7eea92 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_sub_fetch.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_SUB_FETCH_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_SUB_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_SUB_FETCH_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_8) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_8) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_8) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_8) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_16) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_16) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_16) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_16) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_32) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_32) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_32) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_32) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_64) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_64) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_64) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_64) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_128) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_128) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_128) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_128) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_SUB_FETCH_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_thread_fence.h b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_thread_fence.h new file mode 100644 index 00000000..01d8f0f9 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_thread_fence.h @@ -0,0 +1,49 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_THREAD_FENCE_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_THREAD_FENCE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_THREAD_FENCE_*() +// +#if defined(EASTL_COMPILER_ATOMIC_THREAD_FENCE_RELAXED) + #define EASTL_COMPILER_ATOMIC_THREAD_FENCE_RELAXED_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_THREAD_FENCE_RELAXED_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_THREAD_FENCE_ACQUIRE) + #define EASTL_COMPILER_ATOMIC_THREAD_FENCE_ACQUIRE_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_THREAD_FENCE_ACQUIRE_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_THREAD_FENCE_RELEASE) + #define EASTL_COMPILER_ATOMIC_THREAD_FENCE_RELEASE_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_THREAD_FENCE_RELEASE_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_THREAD_FENCE_ACQ_REL) + #define EASTL_COMPILER_ATOMIC_THREAD_FENCE_ACQ_REL_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_THREAD_FENCE_ACQ_REL_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_THREAD_FENCE_SEQ_CST) + #define EASTL_COMPILER_ATOMIC_THREAD_FENCE_SEQ_CST_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_THREAD_FENCE_SEQ_CST_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_THREAD_FENCE_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_xor_fetch.h b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_xor_fetch.h new file mode 100644 index 00000000..05680bd1 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/compiler_xor_fetch.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_XOR_FETCH_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_XOR_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_XOR_FETCH_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_8) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_8) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_8) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_8) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_16) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_16) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_16) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_16) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_32) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_32) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_32) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_32) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_64) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_64) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_64) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_64) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_128) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_128) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_128) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_128) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_XOR_FETCH_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc.h b/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc.h new file mode 100644 index 00000000..26a99c20 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc.h @@ -0,0 +1,154 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +/** + * NOTE: + * + * gcc __atomic builtins may defer to function calls in libatomic.so for architectures that do not + * support atomic instructions of a given size. These functions will be implemented with pthread_mutex_t. + * It also requires the explicit linking against the compiler runtime libatomic.so. + * On architectures that do not support atomics, like armv6 the builtins may defer to kernel helpers + * or on classic uniprocessor systems just disable interrupts. + * + * We do not want to have to link against libatomic.so or fall into the trap of our atomics degrading + * into locks. We would rather have user-code explicitly use locking primitives if their code cannot + * be satisfied with atomic instructions on the given platform. + */ +static_assert(__atomic_always_lock_free(1, 0), "eastl::atomic where sizeof(T) == 1 must be lock-free!"); +static_assert(__atomic_always_lock_free(2, 0), "eastl::atomic where sizeof(T) == 2 must be lock-free!"); +static_assert(__atomic_always_lock_free(4, 0), "eastl::atomic where sizeof(T) == 4 must be lock-free!"); +#if EA_PLATFORM_PTR_SIZE == 8 + static_assert(__atomic_always_lock_free(8, 0), "eastl::atomic where sizeof(T) == 8 must be lock-free!"); +#endif + +/** + * NOTE: + * + * The following can fail on gcc/clang on 64-bit systems. + * Firstly, it depends on the -march setting on clang whether or not it calls out to libatomic for 128-bit operations. + * Second, gcc always calls out to libatomic for 128-bit atomics. It is unclear if it uses locks + * or tries to look at the cpuid and use cmpxchg16b if its available. + * gcc mailing lists argue that since load must be implemented with cmpxchg16b, then the __atomic bultin + * cannot be used in read-only memory which is why they always call out to libatomic. + * There is no way to tell gcc to not do that, unfortunately. + * We don't care about the read-only restriction because our eastl::atomic object is mutable + * and also msvc doesn't enforce this restriction thus to be fully platform agnostic we cannot either. + * + * Therefore, the follow static_assert is commented out for the time being, as it always fails on these compilers. + * We still guarantee 128-bit atomics are lock-free by handrolling the inline assembly ourselves. + * + * static_assert(__atomic_always_lock_free(16, 0), "eastl::atomic where sizeof(T) == 16 must be lock-free!"); + */ + +/** + * NOTE: + * + * Why do we do the cast to the unsigned fixed width types for every operation even though gcc/clang builtins are generics? + * Well gcc/clang correctly-incorrectly call out to libatomic and do locking on user types that may be potentially misaligned. + * struct UserType { uint8_t a,b; }; This given struct is 2 bytes in size but has only 1 byte alignment. + * gcc/clang cannot and doesn't know that we always guarantee every type T is size aligned within eastl::atomic. + * Therefore it always emits calls into libatomic and does locking for structs like these which we do not want. + * Therefore you'll notice we always cast each atomic ptr type to the equivalent unsigned fixed width type when doing the atomic operations. + * This ensures all user types are size aligned and thus are lock free. + */ + + +///////////////////////////////////////////////////////////////////////////////// + + +#define EASTL_COMPILER_ATOMIC_HAS_8BIT +#define EASTL_COMPILER_ATOMIC_HAS_16BIT +#define EASTL_COMPILER_ATOMIC_HAS_32BIT +#define EASTL_COMPILER_ATOMIC_HAS_64BIT + +#if EA_PLATFORM_PTR_SIZE == 8 + #define EASTL_COMPILER_ATOMIC_HAS_128BIT +#endif + + +///////////////////////////////////////////////////////////////////////////////// + + +#define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_8 uint8_t +#define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_16 uint16_t +#define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_32 uint32_t +#define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_64 uint64_t +#define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_128 __uint128_t + + +///////////////////////////////////////////////////////////////////////////////// + + +#define EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, fetchIntrinsic, type, ret, ptr, val, gccMemoryOrder) \ + { \ + integralType retIntegral; \ + integralType valIntegral = EASTL_ATOMIC_TYPE_PUN_CAST(integralType, (val)); \ + \ + retIntegral = fetchIntrinsic(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), valIntegral, gccMemoryOrder); \ + \ + ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, retIntegral); \ + } + +#define EASTL_GCC_ATOMIC_CMPXCHG_INTRIN_N(integralType, type, ret, ptr, expected, desired, weak, successOrder, failOrder) \ + ret = __atomic_compare_exchange(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), \ + EASTL_ATOMIC_INTEGRAL_CAST(integralType, (expected)), \ + EASTL_ATOMIC_INTEGRAL_CAST(integralType, &(desired)), \ + weak, successOrder, failOrder) + +#define EASTL_GCC_ATOMIC_EXCHANGE_INTRIN_N(integralType, type, ret, ptr, val, gccMemoryOrder) \ + { \ + integralType retIntegral; \ + integralType valIntegral = EASTL_ATOMIC_TYPE_PUN_CAST(integralType, (val)); \ + \ + __atomic_exchange(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), \ + &valIntegral, &retIntegral, gccMemoryOrder); \ + \ + ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, retIntegral); \ + } + + +///////////////////////////////////////////////////////////////////////////////// + + +#include "compiler_gcc_fetch_add.h" +#include "compiler_gcc_fetch_sub.h" + +#include "compiler_gcc_fetch_and.h" +#include "compiler_gcc_fetch_xor.h" +#include "compiler_gcc_fetch_or.h" + +#include "compiler_gcc_add_fetch.h" +#include "compiler_gcc_sub_fetch.h" + +#include "compiler_gcc_and_fetch.h" +#include "compiler_gcc_xor_fetch.h" +#include "compiler_gcc_or_fetch.h" + +#include "compiler_gcc_exchange.h" + +#include "compiler_gcc_cmpxchg_weak.h" +#include "compiler_gcc_cmpxchg_strong.h" + +#include "compiler_gcc_load.h" +#include "compiler_gcc_store.h" + +#include "compiler_gcc_barrier.h" + +#include "compiler_gcc_cpu_pause.h" + +#include "compiler_gcc_signal_fence.h" + +#include "compiler_gcc_thread_fence.h" + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_add_fetch.h b/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_add_fetch.h new file mode 100644 index 00000000..1d19196b --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_add_fetch.h @@ -0,0 +1,118 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_ADD_FETCH_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_ADD_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_GCC_ATOMIC_ADD_FETCH_N(integralType, type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, __atomic_add_fetch, type, ret, ptr, val, gccMemoryOrder) + + +#define EASTL_GCC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_ADD_FETCH_N(uint8_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_ADD_FETCH_N(uint16_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_ADD_FETCH_N(uint32_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_ADD_FETCH_N(uint64_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_ADD_FETCH_128(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_ADD_FETCH_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_ADD_FETCH_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_128(type, ret, ptr, val, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_128(type, ret, ptr, val, __ATOMIC_ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_128(type, ret, ptr, val, __ATOMIC_RELEASE) + + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_128(type, ret, ptr, val, __ATOMIC_ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_128(type, ret, ptr, val, __ATOMIC_SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_ADD_FETCH_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_and_fetch.h b/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_and_fetch.h new file mode 100644 index 00000000..a35307f0 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_and_fetch.h @@ -0,0 +1,118 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_AND_FETCH_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_AND_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_GCC_ATOMIC_AND_FETCH_N(integralType, type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, __atomic_and_fetch, type, ret, ptr, val, gccMemoryOrder) + + +#define EASTL_GCC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_AND_FETCH_N(uint8_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_AND_FETCH_N(uint16_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_AND_FETCH_N(uint32_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_AND_FETCH_N(uint64_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_AND_FETCH_128(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_AND_FETCH_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_AND_FETCH_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_128(type, ret, ptr, val, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_128(type, ret, ptr, val, __ATOMIC_ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_128(type, ret, ptr, val, __ATOMIC_RELEASE) + + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_128(type, ret, ptr, val, __ATOMIC_ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_128(type, ret, ptr, val, __ATOMIC_SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_AND_FETCH_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_barrier.h b/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_barrier.h new file mode 100644 index 00000000..64e8e541 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_barrier.h @@ -0,0 +1,30 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_BARRIER_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_BARRIER_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_COMPILER_BARRIER() +// +#define EASTL_COMPILER_ATOMIC_COMPILER_BARRIER() \ + __asm__ __volatile__ ("" ::: "memory") + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY(const T&, type) +// +#define EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY(val, type) \ + __asm__ __volatile__ ("" : /* Output Operands */ : "r"(&(val)) : "memory") + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_BARRIER_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cmpxchg_strong.h b/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cmpxchg_strong.h new file mode 100644 index 00000000..3e47cf2e --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cmpxchg_strong.h @@ -0,0 +1,182 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_CMPXCHG_STRONG_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_CMPXCHG_STRONG_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_GCC_ATOMIC_CMPXCHG_STRONG_N(integralType, type, ret, ptr, expected, desired, successOrder, failOrder) \ + EASTL_GCC_ATOMIC_CMPXCHG_INTRIN_N(integralType, type, ret, ptr, expected, desired, false, successOrder, failOrder) + + +#define EASTL_GCC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, successOrder, failOrder) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_N(uint8_t, type, ret, ptr, expected, desired, successOrder, failOrder) + +#define EASTL_GCC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, successOrder, failOrder) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_N(uint16_t, type, ret, ptr, expected, desired, successOrder, failOrder) + +#define EASTL_GCC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, successOrder, failOrder) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_N(uint32_t, type, ret, ptr, expected, desired, successOrder, failOrder) + +#define EASTL_GCC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, successOrder, failOrder) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_N(uint64_t, type, ret, ptr, expected, desired, successOrder, failOrder) + +#define EASTL_GCC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, successOrder, failOrder) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_N(__uint128_t, type, ret, ptr, expected, desired, successOrder, failOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_*_*_N(type, bool ret, type * ptr, type * expected, type desired) +// +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, __ATOMIC_RELEASE, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, __ATOMIC_RELEASE, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, __ATOMIC_RELEASE, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, __ATOMIC_RELEASE, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, __ATOMIC_RELEASE, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_CMPXCHG_STRONG_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cmpxchg_weak.h b/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cmpxchg_weak.h new file mode 100644 index 00000000..f55fe3a3 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cmpxchg_weak.h @@ -0,0 +1,182 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_CMPXCHG_WEAK_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_CMPXCHG_WEAK_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_GCC_ATOMIC_CMPXCHG_WEAK_N(integralType, type, ret, ptr, expected, desired, successOrder, failOrder) \ + EASTL_GCC_ATOMIC_CMPXCHG_INTRIN_N(integralType, type, ret, ptr, expected, desired, true, successOrder, failOrder) + + +#define EASTL_GCC_ATOMIC_CMPXCHG_WEAK_8(type, ret, ptr, expected, desired, successOrder, failOrder) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_N(uint8_t, type, ret, ptr, expected, desired, successOrder, failOrder) + +#define EASTL_GCC_ATOMIC_CMPXCHG_WEAK_16(type, ret, ptr, expected, desired, successOrder, failOrder) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_N(uint16_t, type, ret, ptr, expected, desired, successOrder, failOrder) + +#define EASTL_GCC_ATOMIC_CMPXCHG_WEAK_32(type, ret, ptr, expected, desired, successOrder, failOrder) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_N(uint32_t, type, ret, ptr, expected, desired, successOrder, failOrder) + +#define EASTL_GCC_ATOMIC_CMPXCHG_WEAK_64(type, ret, ptr, expected, desired, successOrder, failOrder) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_N(uint64_t, type, ret, ptr, expected, desired, successOrder, failOrder) + +#define EASTL_GCC_ATOMIC_CMPXCHG_WEAK_128(type, ret, ptr, expected, desired, successOrder, failOrder) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_N(__uint128_t, type, ret, ptr, expected, desired, successOrder, failOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_*_*_N(type, bool ret, type * ptr, type * expected, type desired) +// +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_8(type, ret, ptr, expected, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_16(type, ret, ptr, expected, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_32(type, ret, ptr, expected, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_64(type, ret, ptr, expected, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_128(type, ret, ptr, expected, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_8(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_16(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_32(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_64(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_128(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_8(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_16(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_32(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_64(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_128(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_8(type, ret, ptr, expected, desired, __ATOMIC_RELEASE, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_16(type, ret, ptr, expected, desired, __ATOMIC_RELEASE, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_32(type, ret, ptr, expected, desired, __ATOMIC_RELEASE, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_64(type, ret, ptr, expected, desired, __ATOMIC_RELEASE, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_128(type, ret, ptr, expected, desired, __ATOMIC_RELEASE, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_8(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_16(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_32(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_64(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_128(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_8(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_16(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_32(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_64(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_128(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_8(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_16(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_32(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_64(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_128(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_8(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_16(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_32(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_64(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_128(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_8(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16(type,ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_16(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_32(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_64(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_128(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_CMPXCHG_WEAK_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cpu_pause.h b/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cpu_pause.h new file mode 100644 index 00000000..9d4ac35e --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cpu_pause.h @@ -0,0 +1,31 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_CPU_PAUSE_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_CPU_PAUSE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_CPU_PAUSE() +// +#if defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64) + + #define EASTL_COMPILER_ATOMIC_CPU_PAUSE() \ + __asm__ __volatile__ ("pause") + +#elif defined(EA_PROCESSOR_ARM32) || defined(EA_PROCESSOR_ARM64) + + #define EASTL_COMPILER_ATOMIC_CPU_PAUSE() \ + __asm__ __volatile__ ("yield") + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_CPU_PAUSE_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_exchange.h b/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_exchange.h new file mode 100644 index 00000000..a3325547 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_exchange.h @@ -0,0 +1,118 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_EXCHANGE_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_EXCHANGE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_GCC_ATOMIC_EXCHANGE_N(integralType, type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_EXCHANGE_INTRIN_N(integralType, type, ret, ptr, val, gccMemoryOrder) + + +#define EASTL_GCC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_EXCHANGE_N(uint8_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_EXCHANGE_N(uint16_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_EXCHANGE_N(uint32_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_EXCHANGE_N(uint64_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_EXCHANGE_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_EXCHANGE_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, __ATOMIC_ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, __ATOMIC_RELEASE) + + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, __ATOMIC_ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, __ATOMIC_SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_EXCHANGE_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_add.h b/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_add.h new file mode 100644 index 00000000..98abbb83 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_add.h @@ -0,0 +1,118 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_ADD_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_ADD_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_GCC_ATOMIC_FETCH_ADD_N(integralType, type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, __atomic_fetch_add, type, ret, ptr, val, gccMemoryOrder) + + +#define EASTL_GCC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_ADD_N(uint8_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_ADD_N(uint16_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_ADD_N(uint32_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_ADD_N(uint64_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_FETCH_ADD_128(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_ADD_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_FETCH_ADD_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_128(type, ret, ptr, val, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_128(type, ret, ptr, val, __ATOMIC_ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_128(type, ret, ptr, val, __ATOMIC_RELEASE) + + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_128(type, ret, ptr, val, __ATOMIC_ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_128(type, ret, ptr, val, __ATOMIC_SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_ADD_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_and.h b/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_and.h new file mode 100644 index 00000000..0dfb81db --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_and.h @@ -0,0 +1,118 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_AND_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_AND_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_GCC_ATOMIC_FETCH_AND_N(integralType, type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, __atomic_fetch_and, type, ret, ptr, val, gccMemoryOrder) + + +#define EASTL_GCC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_AND_N(uint8_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_AND_N(uint16_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_AND_N(uint32_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_AND_N(uint64_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_FETCH_AND_128(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_AND_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_FETCH_AND_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_128(type, ret, ptr, val, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_128(type, ret, ptr, val, __ATOMIC_ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_128(type, ret, ptr, val, __ATOMIC_RELEASE) + + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_128(type, ret, ptr, val, __ATOMIC_ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_128(type, ret, ptr, val, __ATOMIC_SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_AND_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_or.h b/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_or.h new file mode 100644 index 00000000..ba259b74 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_or.h @@ -0,0 +1,118 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_OR_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_OR_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_GCC_ATOMIC_FETCH_OR_N(integralType, type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, __atomic_fetch_or, type, ret, ptr, val, gccMemoryOrder) + + +#define EASTL_GCC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_OR_N(uint8_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_OR_N(uint16_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_OR_N(uint32_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_OR_N(uint64_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_FETCH_OR_128(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_OR_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_FETCH_OR_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_128(type, ret, ptr, val, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_128(type, ret, ptr, val, __ATOMIC_ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_128(type, ret, ptr, val, __ATOMIC_RELEASE) + + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_128(type, ret, ptr, val, __ATOMIC_ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_128(type, ret, ptr, val, __ATOMIC_SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_OR_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_sub.h b/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_sub.h new file mode 100644 index 00000000..c8be225e --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_sub.h @@ -0,0 +1,118 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_SUB_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_SUB_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_GCC_ATOMIC_FETCH_SUB_N(integralType, type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, __atomic_fetch_sub, type, ret, ptr, val, gccMemoryOrder) + + +#define EASTL_GCC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_SUB_N(uint8_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_SUB_N(uint16_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_SUB_N(uint32_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_SUB_N(uint64_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_FETCH_SUB_128(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_SUB_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_FETCH_SUB_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_128(type, ret, ptr, val, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_128(type, ret, ptr, val, __ATOMIC_ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_128(type, ret, ptr, val, __ATOMIC_RELEASE) + + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_128(type, ret, ptr, val, __ATOMIC_ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_128(type, ret, ptr, val, __ATOMIC_SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_SUB_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_xor.h b/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_xor.h new file mode 100644 index 00000000..4ec6d676 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_xor.h @@ -0,0 +1,118 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_XOR_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_XOR_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_GCC_ATOMIC_FETCH_XOR_N(integralType, type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, __atomic_fetch_xor, type, ret, ptr, val, gccMemoryOrder) + + +#define EASTL_GCC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_XOR_N(uint8_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_XOR_N(uint16_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_XOR_N(uint32_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_XOR_N(uint64_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_FETCH_XOR_128(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_XOR_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_FETCH_XOR_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_128(type, ret, ptr, val, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_128(type, ret, ptr, val, __ATOMIC_ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_128(type, ret, ptr, val, __ATOMIC_RELEASE) + + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_128(type, ret, ptr, val, __ATOMIC_ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_128(type, ret, ptr, val, __ATOMIC_SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_XOR_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_load.h b/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_load.h new file mode 100644 index 00000000..a4a3ebf1 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_load.h @@ -0,0 +1,90 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_LOAD_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_LOAD_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_GCC_ATOMIC_LOAD_N(integralType, type, ret, ptr, gccMemoryOrder) \ + { \ + integralType retIntegral; \ + __atomic_load(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), &retIntegral, gccMemoryOrder); \ + \ + ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, retIntegral); \ + } + +#define EASTL_GCC_ATOMIC_LOAD_8(type, ret, ptr, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_LOAD_N(uint8_t, type, ret, ptr, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_LOAD_16(type, ret, ptr, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_LOAD_N(uint16_t, type, ret, ptr, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_LOAD_32(type, ret, ptr, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_LOAD_N(uint32_t, type, ret, ptr, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_LOAD_64(type, ret, ptr, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_LOAD_N(uint64_t, type, ret, ptr, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_LOAD_128(type, ret, ptr, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_LOAD_N(__uint128_t, type, ret, ptr, gccMemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_LOAD_*_N(type, type ret, type * ptr) +// +#define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_8(type, ret, ptr) \ + EASTL_GCC_ATOMIC_LOAD_8(type, ret, ptr, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_16(type, ret, ptr) \ + EASTL_GCC_ATOMIC_LOAD_16(type, ret, ptr, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_32(type, ret, ptr) \ + EASTL_GCC_ATOMIC_LOAD_32(type, ret, ptr, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_64(type, ret, ptr) \ + EASTL_GCC_ATOMIC_LOAD_64(type, ret, ptr, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_128(type, ret, ptr) \ + EASTL_GCC_ATOMIC_LOAD_128(type, ret, ptr, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_8(type, ret, ptr) \ + EASTL_GCC_ATOMIC_LOAD_8(type, ret, ptr, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_16(type, ret, ptr) \ + EASTL_GCC_ATOMIC_LOAD_16(type, ret, ptr, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_32(type, ret, ptr) \ + EASTL_GCC_ATOMIC_LOAD_32(type, ret, ptr, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_64(type, ret, ptr) \ + EASTL_GCC_ATOMIC_LOAD_64(type, ret, ptr, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_128(type, ret, ptr) \ + EASTL_GCC_ATOMIC_LOAD_128(type, ret, ptr, __ATOMIC_ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_8(type, ret, ptr) \ + EASTL_GCC_ATOMIC_LOAD_8(type, ret, ptr, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_16(type, ret, ptr) \ + EASTL_GCC_ATOMIC_LOAD_16(type, ret, ptr, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_32(type, ret, ptr) \ + EASTL_GCC_ATOMIC_LOAD_32(type, ret, ptr, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_64(type, ret, ptr) \ + EASTL_GCC_ATOMIC_LOAD_64(type, ret, ptr, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_128(type, ret, ptr) \ + EASTL_GCC_ATOMIC_LOAD_128(type, ret, ptr, __ATOMIC_SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_LOAD_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_or_fetch.h b/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_or_fetch.h new file mode 100644 index 00000000..9e4db3e1 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_or_fetch.h @@ -0,0 +1,118 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_OR_FETCH_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_OR_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_GCC_ATOMIC_OR_FETCH_N(integralType, type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, __atomic_or_fetch, type, ret, ptr, val, gccMemoryOrder) + + +#define EASTL_GCC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_OR_FETCH_N(uint8_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_OR_FETCH_N(uint16_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_OR_FETCH_N(uint32_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_OR_FETCH_N(uint64_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_OR_FETCH_128(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_OR_FETCH_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_OR_FETCH_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_128(type, ret, ptr, val, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_128(type, ret, ptr, val, __ATOMIC_ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_128(type, ret, ptr, val, __ATOMIC_RELEASE) + + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_128(type, ret, ptr, val, __ATOMIC_ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_128(type, ret, ptr, val, __ATOMIC_SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_OR_FETCH_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_signal_fence.h b/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_signal_fence.h new file mode 100644 index 00000000..16dff14f --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_signal_fence.h @@ -0,0 +1,38 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_SIGNAL_FENCE_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_SIGNAL_FENCE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_GCC_ATOMIC_SIGNAL_FENCE(gccMemoryOrder) \ + __atomic_signal_fence(gccMemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_*() +// +#define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_RELAXED() \ + EASTL_GCC_ATOMIC_SIGNAL_FENCE(__ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_ACQUIRE() \ + EASTL_GCC_ATOMIC_SIGNAL_FENCE(__ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_RELEASE() \ + EASTL_GCC_ATOMIC_SIGNAL_FENCE(__ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_ACQ_REL() \ + EASTL_GCC_ATOMIC_SIGNAL_FENCE(__ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_SEQ_CST() \ + EASTL_GCC_ATOMIC_SIGNAL_FENCE(__ATOMIC_SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_SIGNAL_FENCE_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_store.h b/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_store.h new file mode 100644 index 00000000..04a28ac4 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_store.h @@ -0,0 +1,89 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_STORE_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_STORE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_GCC_ATOMIC_STORE_N(integralType, ptr, val, gccMemoryOrder) \ + { \ + integralType valIntegral = EASTL_ATOMIC_TYPE_PUN_CAST(integralType, (val)); \ + __atomic_store(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), &valIntegral, gccMemoryOrder); \ + } + + +#define EASTL_GCC_ATOMIC_STORE_8(ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_STORE_N(uint8_t, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_STORE_16(ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_STORE_N(uint16_t, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_STORE_32(ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_STORE_N(uint32_t, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_STORE_64(ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_STORE_N(uint64_t, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_STORE_128(ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_STORE_N(__uint128_t, ptr, val, gccMemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_STORE_*_N(type, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_STORE_RELAXED_8(type, ptr, val) \ + EASTL_GCC_ATOMIC_STORE_8(ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_STORE_RELAXED_16(type, ptr, val) \ + EASTL_GCC_ATOMIC_STORE_16(ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_STORE_RELAXED_32(type, ptr, val) \ + EASTL_GCC_ATOMIC_STORE_32(ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_STORE_RELAXED_64(type, ptr, val) \ + EASTL_GCC_ATOMIC_STORE_64(ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_STORE_RELAXED_128(type, ptr, val) \ + EASTL_GCC_ATOMIC_STORE_128(ptr, val, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_STORE_RELEASE_8(type, ptr, val) \ + EASTL_GCC_ATOMIC_STORE_8(ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_STORE_RELEASE_16(type, ptr, val) \ + EASTL_GCC_ATOMIC_STORE_16(ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_STORE_RELEASE_32(type, ptr, val) \ + EASTL_GCC_ATOMIC_STORE_32(ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_STORE_RELEASE_64(type, ptr, val) \ + EASTL_GCC_ATOMIC_STORE_64(ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_STORE_RELEASE_128(type, ptr, val) \ + EASTL_GCC_ATOMIC_STORE_128(ptr, val, __ATOMIC_RELEASE) + + +#define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_8(type, ptr, val) \ + EASTL_GCC_ATOMIC_STORE_8(ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_16(type, ptr, val) \ + EASTL_GCC_ATOMIC_STORE_16(ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_32(type, ptr, val) \ + EASTL_GCC_ATOMIC_STORE_32(ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_64(type, ptr, val) \ + EASTL_GCC_ATOMIC_STORE_64(ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_128(type, ptr, val) \ + EASTL_GCC_ATOMIC_STORE_128(ptr, val, __ATOMIC_SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_STORE_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_sub_fetch.h b/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_sub_fetch.h new file mode 100644 index 00000000..62f8cd91 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_sub_fetch.h @@ -0,0 +1,118 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_SUB_FETCH_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_SUB_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_GCC_ATOMIC_SUB_FETCH_N(integralType, type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, __atomic_sub_fetch, type, ret, ptr, val, gccMemoryOrder) + + +#define EASTL_GCC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_SUB_FETCH_N(uint8_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_SUB_FETCH_N(uint16_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_SUB_FETCH_N(uint32_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_SUB_FETCH_N(uint64_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_SUB_FETCH_128(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_SUB_FETCH_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_SUB_FETCH_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_128(type, ret, ptr, val, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_128(type, ret, ptr, val, __ATOMIC_ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_128(type, ret, ptr, val, __ATOMIC_RELEASE) + + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_128(type, ret, ptr, val, __ATOMIC_ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_128(type, ret, ptr, val, __ATOMIC_SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_SUB_FETCH_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_thread_fence.h b/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_thread_fence.h new file mode 100644 index 00000000..0dd005e4 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_thread_fence.h @@ -0,0 +1,38 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_THREAD_FENCE_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_THREAD_FENCE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_GCC_ATOMIC_THREAD_FENCE(gccMemoryOrder) \ + __atomic_thread_fence(gccMemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_THREAD_FENCE_*() +// +#define EASTL_COMPILER_ATOMIC_THREAD_FENCE_RELAXED() \ + EASTL_GCC_ATOMIC_THREAD_FENCE(__ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_THREAD_FENCE_ACQUIRE() \ + EASTL_GCC_ATOMIC_THREAD_FENCE(__ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_THREAD_FENCE_RELEASE() \ + EASTL_GCC_ATOMIC_THREAD_FENCE(__ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_THREAD_FENCE_ACQ_REL() \ + EASTL_GCC_ATOMIC_THREAD_FENCE(__ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_THREAD_FENCE_SEQ_CST() \ + EASTL_GCC_ATOMIC_THREAD_FENCE(__ATOMIC_SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_THREAD_FENCE_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_xor_fetch.h b/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_xor_fetch.h new file mode 100644 index 00000000..4827d79f --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_xor_fetch.h @@ -0,0 +1,118 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_XOR_FETCH_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_XOR_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_GCC_ATOMIC_XOR_FETCH_N(integralType, type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, __atomic_xor_fetch, type, ret, ptr, val, gccMemoryOrder) + + +#define EASTL_GCC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_XOR_FETCH_N(uint8_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_XOR_FETCH_N(uint16_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_XOR_FETCH_N(uint32_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_XOR_FETCH_N(uint64_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_XOR_FETCH_128(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_XOR_FETCH_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_XOR_FETCH_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_128(type, ret, ptr, val, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_128(type, ret, ptr, val, __ATOMIC_ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_128(type, ret, ptr, val, __ATOMIC_RELEASE) + + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_128(type, ret, ptr, val, __ATOMIC_ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_128(type, ret, ptr, val, __ATOMIC_SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_XOR_FETCH_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc.h b/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc.h new file mode 100644 index 00000000..90901eec --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc.h @@ -0,0 +1,259 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +EA_DISABLE_ALL_VC_WARNINGS(); +#include +EA_RESTORE_ALL_VC_WARNINGS(); + + +///////////////////////////////////////////////////////////////////////////////// + + +#define EASTL_COMPILER_ATOMIC_HAS_8BIT +#define EASTL_COMPILER_ATOMIC_HAS_16BIT +#define EASTL_COMPILER_ATOMIC_HAS_32BIT +#define EASTL_COMPILER_ATOMIC_HAS_64BIT + +#if EA_PLATFORM_PTR_SIZE == 8 + #define EASTL_COMPILER_ATOMIC_HAS_128BIT +#endif + + +///////////////////////////////////////////////////////////////////////////////// + + +#define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_8 char +#define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_16 short +#define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_32 long +#define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_64 __int64 + +namespace eastl +{ + +namespace internal +{ + +struct FixedWidth128 +{ + __int64 value[2]; +}; + +} // namespace internal + +} // namespace eastl + +#define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_128 eastl::internal::FixedWidth128 + + +///////////////////////////////////////////////////////////////////////////////// + + +/** + * NOTE: + * + * Unfortunately MSVC Intrinsics depend on the architecture + * that we are compiling for. + * These are some indirection macros to make our lives easier and + * ensure the least possible amount of copy-paste to reduce programmer errors. + * + * All compiler implementations end up deferring to the below macros. + */ +#if defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64) + + + #define EASTL_MSVC_ATOMIC_FETCH_OP(ret, ptr, val, MemoryOrder, Intrinsic) \ + ret = Intrinsic(ptr, val) + + #define EASTL_MSVC_ATOMIC_EXCHANGE_OP(ret, ptr, val, MemoryOrder, Intrinsic) \ + ret = Intrinsic(ptr, val) + + #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP(ret, ptr, comparand, exchange, MemoryOrder, Intrinsic) \ + ret = Intrinsic(ptr, exchange, comparand) + + #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128_OP(ret, ptr, comparandResult, exchangeHigh, exchangeLow, MemoryOrder) \ + ret = _InterlockedCompareExchange128_np(ptr, exchangeHigh, exchangeLow, comparandResult) + + +#elif defined(EA_PROCESSOR_ARM32) || defined(EA_PROCESSOR_ARM64) + + + #define EASTL_MSVC_INTRINSIC_RELAXED(Intrinsic) \ + EA_PREPROCESSOR_JOIN(Intrinsic, _nf) + + #define EASTL_MSVC_INTRINSIC_ACQUIRE(Intrinsic) \ + EA_PREPROCESSOR_JOIN(Intrinsic, _acq) + + #define EASTL_MSVC_INTRINSIC_RELEASE(Intrinsic) \ + EA_PREPROCESSOR_JOIN(Intrinsic, _rel) + + #define EASTL_MSVC_INTRINSIC_ACQ_REL(Intrinsic) \ + Intrinsic + + #define EASTL_MSVC_INTRINSIC_SEQ_CST(Intrinsic) \ + Intrinsic + + + #define EASTL_MSVC_ATOMIC_FETCH_OP(ret, ptr, val, MemoryOrder, Intrinsic) \ + ret = EA_PREPROCESSOR_JOIN(EASTL_MSVC_INTRINSIC_, MemoryOrder)(Intrinsic)(ptr, val) + + #define EASTL_MSVC_ATOMIC_EXCHANGE_OP(ret, ptr, val, MemoryOrder, Intrinsic) \ + ret = EA_PREPROCESSOR_JOIN(EASTL_MSVC_INTRINSIC_, MemoryOrder)(Intrinsic)(ptr, val) + + #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP(ret, ptr, comparand, exchange, MemoryOrder, Intrinsic) \ + ret = EA_PREPROCESSOR_JOIN(EASTL_MSVC_INTRINSIC_, MemoryOrder)(Intrinsic)(ptr, exchange, comparand) + + #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128_OP(ret, ptr, comparandResult, exchangeHigh, exchangeLow, MemoryOrder) \ + ret = EA_PREPROCESSOR_JOIN(EASTL_MSVC_INTRINSIC_, MemoryOrder)(_InterlockedCompareExchange128)(ptr, exchangeHigh, exchangeLow, comparandResult) + + +#endif + + +///////////////////////////////////////////////////////////////////////////////// + + +#define EASTL_MSVC_NOP_POST_INTRIN_COMPUTE(ret, lhs, rhs) + +#define EASTL_MSVC_NOP_PRE_INTRIN_COMPUTE(ret, val) \ + ret = (val) + + +#define EASTL_MSVC_ATOMIC_FETCH_INTRIN_N(integralType, fetchIntrinsic, type, ret, ptr, val, MemoryOrder, PRE_INTRIN_COMPUTE, POST_INTRIN_COMPUTE) \ + { \ + integralType retIntegral; \ + type valCompute; \ + \ + PRE_INTRIN_COMPUTE(valCompute, (val)); \ + const integralType valIntegral = EASTL_ATOMIC_TYPE_PUN_CAST(integralType, valCompute); \ + \ + EASTL_MSVC_ATOMIC_FETCH_OP(retIntegral, EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), \ + valIntegral, MemoryOrder, fetchIntrinsic); \ + \ + ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, retIntegral); \ + POST_INTRIN_COMPUTE(ret, ret, (val)); \ + } + +#define EASTL_MSVC_ATOMIC_EXCHANGE_INTRIN_N(integralType, exchangeIntrinsic, type, ret, ptr, val, MemoryOrder) \ + { \ + integralType retIntegral; \ + EASTL_MSVC_ATOMIC_EXCHANGE_OP(retIntegral, EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), \ + EASTL_ATOMIC_TYPE_PUN_CAST(integralType, (val)), MemoryOrder, \ + exchangeIntrinsic); \ + \ + ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, retIntegral); \ + } + +#define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_N(integralType, cmpxchgStrongIntrinsic, type, ret, ptr, expected, desired, MemoryOrder) \ + { \ + integralType comparandIntegral = EASTL_ATOMIC_TYPE_PUN_CAST(integralType, *(expected)); \ + integralType oldIntegral; \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP(oldIntegral, EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), \ + comparandIntegral, EASTL_ATOMIC_TYPE_PUN_CAST(integralType, (desired)), \ + MemoryOrder, cmpxchgStrongIntrinsic); \ + \ + if (oldIntegral == comparandIntegral) \ + { \ + ret = true; \ + } \ + else \ + { \ + *(expected) = EASTL_ATOMIC_TYPE_PUN_CAST(type, oldIntegral); \ + ret = false; \ + } \ + } + +/** + * In my own opinion, I found the wording on Microsoft docs a little confusing. + * ExchangeHigh means the top 8 bytes so (ptr + 8). + * ExchangeLow means the low 8 butes so (ptr). + * Endianness does not matter since we are just loading data and comparing data. + * Thought of as memcpy() and memcmp() function calls whereby the layout of the + * data itself is irrelevant. + * Only after we type pun back to the original type, and load from memory does + * the layout of the data matter again. + */ +#define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_128(type, ret, ptr, expected, desired, MemoryOrder) \ + { \ + union TypePun \ + { \ + type templateType; \ + \ + struct exchange128 \ + { \ + __int64 value[2]; \ + }; \ + \ + struct exchange128 exchangePun; \ + }; \ + \ + union TypePun typePun = { (desired) }; \ + \ + unsigned char cmpxchgRetChar; \ + cmpxchgRetChar = EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128_OP(cmpxchgRetChar, EASTL_ATOMIC_VOLATILE_TYPE_CAST(__int64, (ptr)), \ + EASTL_ATOMIC_TYPE_CAST(__int64, (expected)), \ + typePun.exchangePun.value[1], typePun.exchangePun.value[0], \ + MemoryOrder); \ + \ + ret = static_cast(cmpxchgRetChar); \ + } + + +///////////////////////////////////////////////////////////////////////////////// + + +#define EASTL_MSVC_ATOMIC_FETCH_OP_N(integralType, fetchIntrinsic, type, ret, ptr, val, MemoryOrder, PRE_INTRIN_COMPUTE) \ + EASTL_MSVC_ATOMIC_FETCH_INTRIN_N(integralType, fetchIntrinsic, type, ret, ptr, val, MemoryOrder, PRE_INTRIN_COMPUTE, EASTL_MSVC_NOP_POST_INTRIN_COMPUTE) + +#define EASTL_MSVC_ATOMIC_OP_FETCH_N(integralType, fetchIntrinsic, type, ret, ptr, val, MemoryOrder, PRE_INTRIN_COMPUTE, POST_INTRIN_COMPUTE) \ + EASTL_MSVC_ATOMIC_FETCH_INTRIN_N(integralType, fetchIntrinsic, type, ret, ptr, val, MemoryOrder, PRE_INTRIN_COMPUTE, POST_INTRIN_COMPUTE) + +#define EASTL_MSVC_ATOMIC_EXCHANGE_OP_N(integralType, exchangeIntrinsic, type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_EXCHANGE_INTRIN_N(integralType, exchangeIntrinsic, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP_N(integralType, cmpxchgStrongIntrinsic, type, ret, ptr, expected, desired, MemoryOrder) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_N(integralType, cmpxchgStrongIntrinsic, type, ret, ptr, expected, desired, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP_128(type, ret, ptr, expected, desired, MemoryOrder) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_128(type, ret, ptr, expected, desired, MemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// + + +#include "compiler_msvc_fetch_add.h" +#include "compiler_msvc_fetch_sub.h" + +#include "compiler_msvc_fetch_and.h" +#include "compiler_msvc_fetch_xor.h" +#include "compiler_msvc_fetch_or.h" + +#include "compiler_msvc_add_fetch.h" +#include "compiler_msvc_sub_fetch.h" + +#include "compiler_msvc_and_fetch.h" +#include "compiler_msvc_xor_fetch.h" +#include "compiler_msvc_or_fetch.h" + +#include "compiler_msvc_exchange.h" + +#include "compiler_msvc_cmpxchg_weak.h" +#include "compiler_msvc_cmpxchg_strong.h" + +#include "compiler_msvc_barrier.h" + +#include "compiler_msvc_cpu_pause.h" + +#include "compiler_msvc_signal_fence.h" + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_add_fetch.h b/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_add_fetch.h new file mode 100644 index 00000000..12fc4b04 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_add_fetch.h @@ -0,0 +1,104 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_ADD_FETCH_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_ADD_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_MSVC_ADD_FETCH_POST_INTRIN_COMPUTE(ret, val, addend) \ + ret = (val) + (addend) + +#define EASTL_MSVC_ATOMIC_ADD_FETCH_N(integralType, addIntrinsic, type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_OP_FETCH_N(integralType, addIntrinsic, type, ret, ptr, val, MemoryOrder, \ + EASTL_MSVC_NOP_PRE_INTRIN_COMPUTE, EASTL_MSVC_ADD_FETCH_POST_INTRIN_COMPUTE) + + +#define EASTL_MSVC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_N(char, _InterlockedExchangeAdd8, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_N(short, _InterlockedExchangeAdd16, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_N(long, _InterlockedExchangeAdd, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_N(__int64, _InterlockedExchangeAdd64, type, ret, ptr, val, MemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_ADD_FETCH_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, RELAXED) + + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, RELEASE) + + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_ADD_FETCH_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_and_fetch.h b/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_and_fetch.h new file mode 100644 index 00000000..70ec577f --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_and_fetch.h @@ -0,0 +1,121 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_AND_FETCH_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_AND_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#if defined(EA_PROCESSOR_X86_64) + + #define EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_8 _InterlockedAnd8_np + #define EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_16 _InterlockedAnd16_np + #define EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_32 _InterlockedAnd_np + #define EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_64 _InterlockedAnd64_np + +#else + + #define EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_8 _InterlockedAnd8 + #define EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_16 _InterlockedAnd16 + #define EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_32 _InterlockedAnd + #define EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_64 _InterlockedAnd64 + +#endif + + +#define EASTL_MSVC_AND_FETCH_POST_INTRIN_COMPUTE(ret, val, andend) \ + ret = (val) & (andend) + +#define EASTL_MSVC_ATOMIC_AND_FETCH_N(integralType, andIntrinsic, type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_OP_FETCH_N(integralType, andIntrinsic, type, ret, ptr, val, MemoryOrder, \ + EASTL_MSVC_NOP_PRE_INTRIN_COMPUTE, EASTL_MSVC_AND_FETCH_POST_INTRIN_COMPUTE) + + +#define EASTL_MSVC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_AND_FETCH_N(char, EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_8, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_AND_FETCH_N(short, EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_16, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_AND_FETCH_N(long, EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_32, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_AND_FETCH_N(__int64, EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_64, type, ret, ptr, val, MemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_AND_FETCH_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, RELAXED) + + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, RELEASE) + + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_AND_FETCH_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_barrier.h b/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_barrier.h new file mode 100644 index 00000000..90b78a65 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_barrier.h @@ -0,0 +1,33 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_BARRIER_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_BARRIER_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_COMPILER_BARRIER() +// +#define EASTL_COMPILER_ATOMIC_COMPILER_BARRIER() \ + EA_DISABLE_CLANG_WARNING(-Wdeprecated-declarations) \ + _ReadWriteBarrier() \ + EA_RESTORE_CLANG_WARNING() + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY(const T&, type) +// +#define EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY(val, type) \ + EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY_FUNC(const_cast(eastl::addressof((val)))); \ + EASTL_ATOMIC_COMPILER_BARRIER() + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_BARRIER_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cmpxchg_strong.h b/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cmpxchg_strong.h new file mode 100644 index 00000000..8217f232 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cmpxchg_strong.h @@ -0,0 +1,194 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_CMPXCHG_STRONG_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_CMPXCHG_STRONG_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +#if defined(EA_PROCESSOR_X86_64) + + #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_8 _InterlockedCompareExchange8 + #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_16 _InterlockedCompareExchange16_np + #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_32 _InterlockedCompareExchange_np + #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_64 _InterlockedCompareExchange64_np + +#else + + #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_8 _InterlockedCompareExchange8 + #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_16 _InterlockedCompareExchange16 + #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_32 _InterlockedCompareExchange + #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_64 _InterlockedCompareExchange64 + +#endif + + +#define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, MemoryOrder) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP_N(char, EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_8, type, ret, ptr, expected, desired, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, MemoryOrder) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP_N(short, EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_16, type, ret, ptr, expected, desired, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, MemoryOrder) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP_N(long, EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_32, type, ret, ptr, expected, desired, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, MemoryOrder) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP_N(__int64, EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_64, type, ret, ptr, expected, desired, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, MemoryOrder) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP_128(type, ret, ptr, expected, desired, MemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_*_*_N(type, bool ret, type * ptr, type * expected, type desired) +// +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, RELAXED) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, RELEASE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, RELEASE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, RELEASE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, RELEASE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, RELEASE) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, SEQ_CST) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, SEQ_CST) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_CMPXCHG_STRONG_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cmpxchg_weak.h b/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cmpxchg_weak.h new file mode 100644 index 00000000..8f4147ac --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cmpxchg_weak.h @@ -0,0 +1,162 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_CMPXCHG_WEAK_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_CMPXCHG_WEAK_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_*_*_N(type, bool ret, type * ptr, type * expected, type desired) +// +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_CMPXCHG_WEAK_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cpu_pause.h b/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cpu_pause.h new file mode 100644 index 00000000..5f436b8b --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cpu_pause.h @@ -0,0 +1,22 @@ +///////////////////////////////////////////////////////////////////////////////// +// copyright (c) electronic arts inc. all rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_CPU_PAUSE_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_CPU_PAUSE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +#if defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64) + #define EASTL_COMPILER_ATOMIC_CPU_PAUSE() _mm_pause() +#elif defined(EA_PROCESSOR_ARM32) || defined(EA_PROCESSOR_ARM64) + #define EASTL_COMPILER_ATOMIC_CPU_PAUSE() __yield() +#else + #error Unsupported CPU architecture for EASTL_COMPILER_ATOMIC_CPU_PAUSE +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_CPU_PAUSE_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_exchange.h b/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_exchange.h new file mode 100644 index 00000000..323f1fae --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_exchange.h @@ -0,0 +1,125 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_EXCHANGE_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_EXCHANGE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_MSVC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_EXCHANGE_OP_N(char, _InterlockedExchange8, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_EXCHANGE_OP_N(short, _InterlockedExchange16, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_EXCHANGE_OP_N(long, _InterlockedExchange, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_EXCHANGE_OP_N(__int64, _InterlockedExchange64, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, MemoryOrder) \ + { \ + bool cmpxchgRet; \ + /* This is intentionally a non-atomic 128-bit load which may observe shearing. */ \ + /* Either we do not observe *(ptr) but then the cmpxchg will fail and the observed */ \ + /* atomic load will be returned. Or the non-atomic load got lucky and the cmpxchg succeeds */ \ + /* because the observed value equals the value in *(ptr) thus we optimistically do a non-atomic load. */ \ + ret = *(ptr); \ + do \ + { \ + EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_CMPXCHG_STRONG_, MemoryOrder), _128)(type, cmpxchgRet, ptr, &(ret), val); \ + } while (!cmpxchgRet); \ + } + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_EXCHANGE_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_128(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, RELAXED) + + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_128(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, RELEASE) + + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_EXCHANGE_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_add.h b/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_add.h new file mode 100644 index 00000000..a951740e --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_add.h @@ -0,0 +1,101 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_ADD_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_ADD_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_MSVC_ATOMIC_FETCH_ADD_N(integralType, addIntrinsic, type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_OP_N(integralType, addIntrinsic, type, ret, ptr, val, MemoryOrder, \ + EASTL_MSVC_NOP_PRE_INTRIN_COMPUTE) + + +#define EASTL_MSVC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_N(char, _InterlockedExchangeAdd8, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_N(short, _InterlockedExchangeAdd16, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_N(long, _InterlockedExchangeAdd, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_N(__int64, _InterlockedExchangeAdd64, type, ret, ptr, val, MemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_FETCH_ADD_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, RELAXED) + + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, RELEASE) + + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_ADD_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_and.h b/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_and.h new file mode 100644 index 00000000..96f78942 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_and.h @@ -0,0 +1,118 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_AND_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_AND_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#if defined(EA_PROCESSOR_X86_64) + + #define EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_8 _InterlockedAnd8_np + #define EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_16 _InterlockedAnd16_np + #define EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_32 _InterlockedAnd_np + #define EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_64 _InterlockedAnd64_np + +#else + + #define EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_8 _InterlockedAnd8 + #define EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_16 _InterlockedAnd16 + #define EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_32 _InterlockedAnd + #define EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_64 _InterlockedAnd64 + +#endif + + +#define EASTL_MSVC_ATOMIC_FETCH_AND_N(integralType, andIntrinsic, type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_OP_N(integralType, andIntrinsic, type, ret, ptr, val, MemoryOrder, \ + EASTL_MSVC_NOP_PRE_INTRIN_COMPUTE) + + +#define EASTL_MSVC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_AND_N(char, EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_8, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_AND_N(short, EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_16, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_AND_N(long, EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_32, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_AND_N(__int64, EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_64, type, ret, ptr, val, MemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_FETCH_AND_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, RELAXED) + + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, RELEASE) + + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_AND_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_or.h b/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_or.h new file mode 100644 index 00000000..2792fc3d --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_or.h @@ -0,0 +1,118 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_OR_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_OR_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#if defined(EA_PROCESSOR_X86_64) + + #define EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_8 _InterlockedOr8_np + #define EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_16 _InterlockedOr16_np + #define EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_32 _InterlockedOr_np + #define EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_64 _InterlockedOr64_np + +#else + + #define EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_8 _InterlockedOr8 + #define EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_16 _InterlockedOr16 + #define EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_32 _InterlockedOr + #define EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_64 _InterlockedOr64 + +#endif + + +#define EASTL_MSVC_ATOMIC_FETCH_OR_N(integralType, orIntrinsic, type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_OP_N(integralType, orIntrinsic, type, ret, ptr, val, MemoryOrder, \ + EASTL_MSVC_NOP_PRE_INTRIN_COMPUTE) + + +#define EASTL_MSVC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_OR_N(char, EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_8, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_OR_N(short, EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_16, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_OR_N(long, EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_32, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_OR_N(long long, EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_64, type, ret, ptr, val, MemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_FETCH_OR_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, RELAXED) + + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, RELEASE) + + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_OR_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_sub.h b/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_sub.h new file mode 100644 index 00000000..6d5d9e3a --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_sub.h @@ -0,0 +1,104 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_SUB_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_SUB_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_MSVC_FETCH_SUB_PRE_INTRIN_COMPUTE(ret, val) \ + ret = EASTL_ATOMIC_NEGATE_OPERAND((val)) + +#define EASTL_MSVC_ATOMIC_FETCH_SUB_N(integralType, subIntrinsic, type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_OP_N(integralType, subIntrinsic, type, ret, ptr, val, MemoryOrder, \ + EASTL_MSVC_FETCH_SUB_PRE_INTRIN_COMPUTE) + + +#define EASTL_MSVC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_N(char, _InterlockedExchangeAdd8, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_N(short, _InterlockedExchangeAdd16, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_N(long, _InterlockedExchangeAdd, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_N(__int64, _InterlockedExchangeAdd64, type, ret, ptr, val, MemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_FETCH_SUB_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, RELAXED) + + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, RELEASE) + + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_SUB_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_xor.h b/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_xor.h new file mode 100644 index 00000000..371153e9 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_xor.h @@ -0,0 +1,118 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_XOR_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_XOR_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#if defined(EA_PROCESSOR_X86_64) + + #define EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_8 _InterlockedXor8_np + #define EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_16 _InterlockedXor16_np + #define EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_32 _InterlockedXor_np + #define EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_64 _InterlockedXor64_np + +#else + + #define EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_8 _InterlockedXor8 + #define EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_16 _InterlockedXor16 + #define EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_32 _InterlockedXor + #define EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_64 _InterlockedXor64 + +#endif + + +#define EASTL_MSVC_ATOMIC_FETCH_XOR_N(integralType, xorIntrinsic, type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_OP_N(integralType, xorIntrinsic, type, ret, ptr, val, MemoryOrder, \ + EASTL_MSVC_NOP_PRE_INTRIN_COMPUTE) + + +#define EASTL_MSVC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_N(char, EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_8, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_N(short, EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_16, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_N(long, EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_32, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_N(__int64, EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_64, type, ret, ptr, val, MemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_FETCH_XOR_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, RELAXED) + + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, RELEASE) + + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_XOR_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_or_fetch.h b/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_or_fetch.h new file mode 100644 index 00000000..c5b5fac3 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_or_fetch.h @@ -0,0 +1,121 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_OR_FETCH_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_OR_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#if defined(EA_PROCESSOR_X86_64) + + #define EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_8 _InterlockedOr8_np + #define EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_16 _InterlockedOr16_np + #define EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_32 _InterlockedOr_np + #define EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_64 _InterlockedOr64_np + +#else + + #define EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_8 _InterlockedOr8 + #define EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_16 _InterlockedOr16 + #define EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_32 _InterlockedOr + #define EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_64 _InterlockedOr64 + +#endif + + +#define EASTL_MSVC_OR_FETCH_POST_INTRIN_COMPUTE(ret, val, orend) \ + ret = (val) | (orend) + +#define EASTL_MSVC_ATOMIC_OR_FETCH_N(integralType, orIntrinsic, type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_OP_FETCH_N(integralType, orIntrinsic, type, ret, ptr, val, MemoryOrder, \ + EASTL_MSVC_NOP_PRE_INTRIN_COMPUTE, EASTL_MSVC_OR_FETCH_POST_INTRIN_COMPUTE) + + +#define EASTL_MSVC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_OR_FETCH_N(char, EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_8, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_OR_FETCH_N(short, EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_16, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_OR_FETCH_N(long, EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_32, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_OR_FETCH_N(__int64, EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_64, type, ret, ptr, val, MemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_OR_FETCH_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, RELAXED) + + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, RELEASE) + + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_OR_FETCH_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_signal_fence.h b/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_signal_fence.h new file mode 100644 index 00000000..f35f5772 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_signal_fence.h @@ -0,0 +1,34 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_SIGNAL_FENCE_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_SIGNAL_FENCE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_*() +// +#define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_RELAXED() \ + EASTL_ATOMIC_COMPILER_BARRIER() + +#define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_ACQUIRE() \ + EASTL_ATOMIC_COMPILER_BARRIER() + +#define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_RELEASE() \ + EASTL_ATOMIC_COMPILER_BARRIER() + +#define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_ACQ_REL() \ + EASTL_ATOMIC_COMPILER_BARRIER() + +#define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_SEQ_CST() \ + EASTL_ATOMIC_COMPILER_BARRIER() + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_SIGNAL_FENCE_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_sub_fetch.h b/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_sub_fetch.h new file mode 100644 index 00000000..6fb61e29 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_sub_fetch.h @@ -0,0 +1,107 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_SUB_FETCH_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_SUB_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_MSVC_SUB_FETCH_PRE_INTRIN_COMPUTE(ret, val) \ + ret = EASTL_ATOMIC_NEGATE_OPERAND((val)) + +#define EASTL_MSVC_SUB_FETCH_POST_INTRIN_COMPUTE(ret, val, subend) \ + ret = (val) - (subend) + +#define EASTL_MSVC_ATOMIC_SUB_FETCH_N(integralType, subIntrinsic, type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_OP_FETCH_N(integralType, subIntrinsic, type, ret, ptr, val, MemoryOrder, \ + EASTL_MSVC_SUB_FETCH_PRE_INTRIN_COMPUTE, EASTL_MSVC_SUB_FETCH_POST_INTRIN_COMPUTE) + + +#define EASTL_MSVC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_N(char, _InterlockedExchangeAdd8, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_N(short, _InterlockedExchangeAdd16, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_N(long, _InterlockedExchangeAdd, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_N(__int64, _InterlockedExchangeAdd64, type, ret, ptr, val, MemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_SUB_FETCH_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, RELAXED) + + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, RELEASE) + + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_SUB_FETCH_H */ diff --git a/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_xor_fetch.h b/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_xor_fetch.h new file mode 100644 index 00000000..44ffff90 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_xor_fetch.h @@ -0,0 +1,121 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_XOR_FETCH_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_XOR_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#if defined(EA_PROCESSOR_X86_64) + + #define EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_8 _InterlockedXor8_np + #define EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_16 _InterlockedXor16_np + #define EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_32 _InterlockedXor_np + #define EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_64 _InterlockedXor64_np + +#else + + #define EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_8 _InterlockedXor8 + #define EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_16 _InterlockedXor16 + #define EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_32 _InterlockedXor + #define EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_64 _InterlockedXor64 + +#endif + + +#define EASTL_MSVC_XOR_FETCH_POST_INTRIN_COMPUTE(ret, val, xorend) \ + ret = (val) ^ (xorend) + +#define EASTL_MSVC_ATOMIC_XOR_FETCH_N(integralType, xorIntrinsic, type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_OP_FETCH_N(integralType, xorIntrinsic, type, ret, ptr, val, MemoryOrder, \ + EASTL_MSVC_NOP_PRE_INTRIN_COMPUTE, EASTL_MSVC_XOR_FETCH_POST_INTRIN_COMPUTE) + + +#define EASTL_MSVC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_N(char, EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_8, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_N(short, EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_16, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_N(long, EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_32, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_N(__int64, EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_64, type, ret, ptr, val, MemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_XOR_FETCH_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, RELAXED) + + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, RELEASE) + + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_XOR_FETCH_H */ diff --git a/external/EASTL/include/EASTL/internal/char_traits.h b/external/EASTL/include/EASTL/internal/char_traits.h new file mode 100644 index 00000000..2c7b81ef --- /dev/null +++ b/external/EASTL/include/EASTL/internal/char_traits.h @@ -0,0 +1,582 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// This file implements similar functionality to char_traits which is part of +// the C++ standard STL library specification. This is intended for internal +// EASTL use only. Functionality can be accessed through the eastl::string or +// eastl::string_view types. +// +// http://en.cppreference.com/w/cpp/string/char_traits +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_CHAR_TRAITS_H +#define EASTL_CHAR_TRAITS_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +#include +#include +#include + +EA_DISABLE_ALL_VC_WARNINGS() +#include // toupper, etc. +#include // memset, etc. +EA_RESTORE_ALL_VC_WARNINGS() + +namespace eastl +{ + namespace details + { +#if defined(EA_COMPILER_CPP17_ENABLED) + // Helper to detect if wchar_t is the native type for the current platform or if -fshort-wchar was used. + // When that flag is used all string builtins and C Standard Library functions are not usable. + constexpr bool UseNativeWideChar() + { +#if defined(EA_COMPILER_MSVC) + return true; // Irrelevant flag for windows. +#elif defined(EA_PLATFORM_SONY) && defined(EA_PLATFORM_POSIX) && defined(EA_PLATFORM_CONSOLE) + return true; // Sony consoles use short wchar_t disregarding the flag. +#elif defined(EA_PLATFORM_POSIX) || defined(EA_PLATFORM_UNIX) + return sizeof(wchar_t) == 4; +#endif + } +#endif + } + + /////////////////////////////////////////////////////////////////////////////// + /// DecodePart + /// + /// These implement UTF8/UCS2/UCS4 encoding/decoding. + /// + EASTL_API bool DecodePart(const char*& pSrc, const char* pSrcEnd, char*& pDest, char* pDestEnd); + EASTL_API bool DecodePart(const char*& pSrc, const char* pSrcEnd, char16_t*& pDest, char16_t* pDestEnd); + EASTL_API bool DecodePart(const char*& pSrc, const char* pSrcEnd, char32_t*& pDest, char32_t* pDestEnd); + + EASTL_API bool DecodePart(const char16_t*& pSrc, const char16_t* pSrcEnd, char*& pDest, char* pDestEnd); + EASTL_API bool DecodePart(const char16_t*& pSrc, const char16_t* pSrcEnd, char16_t*& pDest, char16_t* pDestEnd); + EASTL_API bool DecodePart(const char16_t*& pSrc, const char16_t* pSrcEnd, char32_t*& pDest, char32_t* pDestEnd); + + EASTL_API bool DecodePart(const char32_t*& pSrc, const char32_t* pSrcEnd, char*& pDest, char* pDestEnd); + EASTL_API bool DecodePart(const char32_t*& pSrc, const char32_t* pSrcEnd, char16_t*& pDest, char16_t* pDestEnd); + EASTL_API bool DecodePart(const char32_t*& pSrc, const char32_t* pSrcEnd, char32_t*& pDest, char32_t* pDestEnd); + + EASTL_API bool DecodePart(const int*& pSrc, const int* pSrcEnd, char*& pDest, char* pDestEnd); + EASTL_API bool DecodePart(const int*& pSrc, const int* pSrcEnd, char16_t*& pDest, char16_t* pDestEnd); + EASTL_API bool DecodePart(const int*& pSrc, const int* pSrcEnd, char32_t*& pDest, char32_t* pDestEnd); + + #if EA_CHAR8_UNIQUE + bool DecodePart(const char8_t*& pSrc, const char8_t* pSrcEnd, char8_t*& pDest, char8_t* pDestEnd); + + bool DecodePart(const char8_t*& pSrc, const char8_t* pSrcEnd, char*& pDest, char* pDestEnd); + bool DecodePart(const char8_t*& pSrc, const char8_t* pSrcEnd, char16_t*& pDest, char16_t* pDestEnd); + bool DecodePart(const char8_t*& pSrc, const char8_t* pSrcEnd, char32_t*& pDest, char32_t* pDestEnd); + + bool DecodePart(const char*& pSrc, const char* pSrcEnd, char8_t*& pDest, char8_t* pDestEnd); + bool DecodePart(const char16_t*& pSrc, const char16_t* pSrcEnd, char8_t*& pDest, char8_t* pDestEnd); + bool DecodePart(const char32_t*& pSrc, const char32_t* pSrcEnd, char8_t*& pDest, char8_t* pDestEnd); + #endif + + #if EA_WCHAR_UNIQUE + bool DecodePart(const wchar_t*& pSrc, const wchar_t* pSrcEnd, wchar_t*& pDest, wchar_t* pDestEnd); + + bool DecodePart(const wchar_t*& pSrc, const wchar_t* pSrcEnd, char*& pDest, char* pDestEnd); + bool DecodePart(const wchar_t*& pSrc, const wchar_t* pSrcEnd, char16_t*& pDest, char16_t* pDestEnd); + bool DecodePart(const wchar_t*& pSrc, const wchar_t* pSrcEnd, char32_t*& pDest, char32_t* pDestEnd); + + bool DecodePart(const char*& pSrc, const char* pSrcEnd, wchar_t*& pDest, wchar_t* pDestEnd); + bool DecodePart(const char16_t*& pSrc, const char16_t* pSrcEnd, wchar_t*& pDest, wchar_t* pDestEnd); + bool DecodePart(const char32_t*& pSrc, const char32_t* pSrcEnd, wchar_t*& pDest, wchar_t* pDestEnd); + #endif + + #if EA_CHAR8_UNIQUE && EA_WCHAR_UNIQUE + bool DecodePart(const char8_t*& pSrc, const char8_t* pSrcEnd, wchar_t*& pDest, wchar_t* pDestEnd); + bool DecodePart(const wchar_t*& pSrc, const wchar_t* pSrcEnd, char8_t*& pDest, char8_t* pDestEnd); + #endif + + + #if EA_WCHAR_UNIQUE + inline bool DecodePart(const wchar_t*& pSrc, const wchar_t* pSrcEnd, wchar_t*& pDest, wchar_t* pDestEnd) + { + return DecodePart(reinterpret_cast(pSrc), reinterpret_cast(pSrcEnd), reinterpret_cast(pDest), reinterpret_cast(pDestEnd)); + } + + inline bool DecodePart(const wchar_t*& pSrc, const wchar_t* pSrcEnd, char*& pDest, char* pDestEnd) + { + #if (EA_WCHAR_SIZE == 2) + return DecodePart(reinterpret_cast(pSrc), reinterpret_cast(pSrcEnd), pDest, pDestEnd); + #elif (EA_WCHAR_SIZE == 4) + return DecodePart(reinterpret_cast(pSrc), reinterpret_cast(pSrcEnd), pDest, pDestEnd); + #endif + } + + inline bool DecodePart(const wchar_t*& pSrc, const wchar_t* pSrcEnd, char16_t*& pDest, char16_t* pDestEnd) + { + #if (EA_WCHAR_SIZE == 2) + return DecodePart(reinterpret_cast(pSrc), reinterpret_cast(pSrcEnd), pDest, pDestEnd); + #elif (EA_WCHAR_SIZE == 4) + return DecodePart(reinterpret_cast(pSrc), reinterpret_cast(pSrcEnd), pDest, pDestEnd); + #endif + } + + inline bool DecodePart(const wchar_t*& pSrc, const wchar_t* pSrcEnd, char32_t*& pDest, char32_t* pDestEnd) + { + #if (EA_WCHAR_SIZE == 2) + return DecodePart(reinterpret_cast(pSrc), reinterpret_cast(pSrcEnd), pDest, pDestEnd); + #elif (EA_WCHAR_SIZE == 4) + return DecodePart(reinterpret_cast(pSrc), reinterpret_cast(pSrcEnd), pDest, pDestEnd); + #endif + } + + inline bool DecodePart(const char*& pSrc, const char* pSrcEnd, wchar_t*& pDest, wchar_t* pDestEnd) + { + #if (EA_WCHAR_SIZE == 2) + return DecodePart(pSrc, pSrcEnd, reinterpret_cast(pDest), reinterpret_cast(pDestEnd)); + #elif (EA_WCHAR_SIZE == 4) + return DecodePart(pSrc, pSrcEnd, reinterpret_cast(pDest), reinterpret_cast(pDestEnd)); + #endif + } + + inline bool DecodePart(const char16_t*& pSrc, const char16_t* pSrcEnd, wchar_t*& pDest, wchar_t* pDestEnd) + { + #if (EA_WCHAR_SIZE == 2) + return DecodePart(pSrc, pSrcEnd, reinterpret_cast(pDest), reinterpret_cast(pDestEnd)); + #elif (EA_WCHAR_SIZE == 4) + return DecodePart(pSrc, pSrcEnd, reinterpret_cast(pDest), reinterpret_cast(pDestEnd)); + #endif + } + + inline bool DecodePart(const char32_t*& pSrc, const char32_t* pSrcEnd, wchar_t*& pDest, wchar_t* pDestEnd) + { + #if (EA_WCHAR_SIZE == 2) + return DecodePart(pSrc, pSrcEnd, reinterpret_cast(pDest), reinterpret_cast(pDestEnd)); + #elif (EA_WCHAR_SIZE == 4) + return DecodePart(pSrc, pSrcEnd, reinterpret_cast(pDest), reinterpret_cast(pDestEnd)); + #endif + } + #endif + + #if EA_CHAR8_UNIQUE + inline bool DecodePart(const char8_t*& pSrc, const char8_t* pSrcEnd, char8_t*& pDest, char8_t* pDestEnd) + { + return DecodePart(reinterpret_cast(pSrc), reinterpret_cast(pSrcEnd), reinterpret_cast(pDest), reinterpret_cast(pDestEnd)); + } + + inline bool DecodePart(const char8_t*& pSrc, const char8_t* pSrcEnd, char*& pDest, char* pDestEnd) + { + return DecodePart(reinterpret_cast(pSrc), reinterpret_cast(pSrcEnd), pDest, pDestEnd); + } + + inline bool DecodePart(const char8_t*& pSrc, const char8_t* pSrcEnd, char16_t*& pDest, char16_t* pDestEnd) + { + return DecodePart(reinterpret_cast(pSrc), reinterpret_cast(pSrcEnd), pDest, pDestEnd); + } + + inline bool DecodePart(const char8_t*& pSrc, const char8_t* pSrcEnd, char32_t*& pDest, char32_t* pDestEnd) + { + return DecodePart(reinterpret_cast(pSrc), reinterpret_cast(pSrcEnd), pDest, pDestEnd); + } + + inline bool DecodePart(const char*& pSrc, const char* pSrcEnd, char8_t*& pDest, char8_t* pDestEnd) + { + return DecodePart(pSrc, pSrcEnd, reinterpret_cast(pDest), reinterpret_cast(pDestEnd)); + } + + inline bool DecodePart(const char16_t*& pSrc, const char16_t* pSrcEnd, char8_t*& pDest, char8_t* pDestEnd) + { + return DecodePart(pSrc, pSrcEnd, reinterpret_cast(pDest), reinterpret_cast(pDestEnd)); + } + + inline bool DecodePart(const char32_t*& pSrc, const char32_t* pSrcEnd, char8_t*& pDest, char8_t* pDestEnd) + { + return DecodePart(pSrc, pSrcEnd, reinterpret_cast(pDest), reinterpret_cast(pDestEnd)); + } + #endif + + #if EA_CHAR8_UNIQUE && EA_WCHAR_UNIQUE + inline bool DecodePart(const char8_t*& pSrc, const char8_t* pSrcEnd, wchar_t*& pDest, wchar_t* pDestEnd) + { + #if (EA_WCHAR_SIZE == 2) + return DecodePart(pSrc, pSrcEnd, reinterpret_cast(pDest), reinterpret_cast(pDestEnd)); + #elif (EA_WCHAR_SIZE == 4) + return DecodePart(pSrc, pSrcEnd, reinterpret_cast(pDest), reinterpret_cast(pDestEnd)); + #endif + } + + inline bool DecodePart(const wchar_t*& pSrc, const wchar_t* pSrcEnd, char8_t*& pDest, char8_t* pDestEnd) + { + #if (EA_WCHAR_SIZE == 2) + return DecodePart(reinterpret_cast(pSrc), reinterpret_cast(pSrcEnd), reinterpret_cast(pDest), reinterpret_cast(pDestEnd)); + #elif (EA_WCHAR_SIZE == 4) + return DecodePart(reinterpret_cast(pSrc), reinterpret_cast(pSrcEnd), reinterpret_cast(pDest), reinterpret_cast(pDestEnd)); + #endif + } + #endif + + /////////////////////////////////////////////////////////////////////////////// + // 'char traits' functionality + // + inline char CharToLower(char c) + { return (char)tolower((uint8_t)c); } + + template + inline T CharToLower(T c) + { if((unsigned)c <= 0xff) return (T)tolower((uint8_t)c); return c; } + + + inline char CharToUpper(char c) + { return (char)toupper((uint8_t)c); } + + template + inline T CharToUpper(T c) + { if((unsigned)c <= 0xff) return (T)toupper((uint8_t)c); return c; } + + + template + int Compare(const T* p1, const T* p2, size_t n) + { + for(; n > 0; ++p1, ++p2, --n) + { + if(*p1 != *p2) + return (static_cast::type>(*p1) < + static_cast::type>(*p2)) ? -1 : 1; + } + return 0; + } + +#if defined(EA_COMPILER_CPP17_ENABLED) + // All main compilers offer a constexpr __builtin_memcmp as soon as C++17 was available. + constexpr int Compare(const char* p1, const char* p2, size_t n) { return __builtin_memcmp(p1, p2, n); } + +#if !defined(EA_COMPILER_GNUC) + // GCC doesn't offer __builtin_wmemcmp. + constexpr int Compare(const wchar_t* p1, const wchar_t* p2, size_t n) + { + if constexpr (details::UseNativeWideChar()) + return __builtin_wmemcmp(p1, p2, n); + else + return Compare(p1, p2, n); + } +#endif // !defined(EA_COMPILER_GNUC) +#else + inline int Compare(const char* p1, const char* p2, size_t n) + { + if (n > 0) + return memcmp(p1, p2, n); + else + return 0; + } +#endif + + template + inline int CompareI(const T* p1, const T* p2, size_t n) + { + for(; n > 0; ++p1, ++p2, --n) + { + const T c1 = CharToLower(*p1); + const T c2 = CharToLower(*p2); + + if(c1 != c2) + return (static_cast::type>(c1) < + static_cast::type>(c2)) ? -1 : 1; + } + return 0; + } + + + template + inline EA_CPP14_CONSTEXPR const T* Find(const T* p, T c, size_t n) + { + for(; n > 0; --n, ++p) + { + if(*p == c) + return p; + } + + return nullptr; + } + +#if defined(EA_COMPILER_CPP17_ENABLED) && defined(EA_COMPILER_CLANG) + // Only clang have __builtin_char_memchr. + // __builtin_memchr doesn't work in a constexpr context since we need to cast the returned void* to a char*. + inline constexpr const char* Find(const char* p, char c, size_t n) + { + return __builtin_char_memchr(p, c, n); + } +#else + inline const char* Find(const char* p, char c, size_t n) + { + return (const char*)memchr(p, c, n); + } +#endif + + template + inline EA_CPP14_CONSTEXPR size_t CharStrlen(const T* p) + { + const auto* pCurrent = p; + while (*pCurrent) + ++pCurrent; + return (size_t)(pCurrent - p); + } + +#if defined(EA_COMPILER_CPP17_ENABLED) && !defined(EA_COMPILER_GNUC) + // So far, GCC seems to struggle with builtin_strlen: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70816 + // MSVC and Clang support both builtins as soon as C++17 was available. + constexpr size_t CharStrlen(const char* p) { return __builtin_strlen(p); } + + constexpr size_t CharStrlen(const wchar_t* p) + { + if constexpr (details::UseNativeWideChar()) + return __builtin_wcslen(p); + else + return CharStrlen(p); + } +#endif + + // If either pDestination or pSource is an invalid or null pointer, the behavior is undefined, even if (pSourceEnd - pSource) is zero. + template + inline T* CharStringUninitializedCopy(const T* pSource, const T* pSourceEnd, T* pDestination) + { + memmove(pDestination, pSource, (size_t)(pSourceEnd - pSource) * sizeof(T)); + return pDestination + (pSourceEnd - pSource); + } + + + // CharTypeStringFindEnd + // Specialized char version of STL find() from back function. + // Not the same as RFind because search range is specified as forward iterators. + template + const T* CharTypeStringFindEnd(const T* pBegin, const T* pEnd, T c) + { + const T* pTemp = pEnd; + while(--pTemp >= pBegin) + { + if(*pTemp == c) + return pTemp; + } + + return pEnd; + } + + + // CharTypeStringSearch + // Specialized value_type version of STL search() function. + // Purpose: find p2 within p1. Return p1End if not found or if either string is zero length. + template + const T* CharTypeStringSearch(const T* p1Begin, const T* p1End, + const T* p2Begin, const T* p2End) + { + // Test for zero length strings, in which case we have a match or a failure, + // but the return value is the same either way. + if((p1Begin == p1End) || (p2Begin == p2End)) + return p1Begin; + + // Test for a pattern of length 1. + if((p2Begin + 1) == p2End) + return eastl::find(p1Begin, p1End, *p2Begin); + + // General case. + const T* pTemp; + const T* pTemp1 = (p2Begin + 1); + const T* pCurrent = p1Begin; + + while(p1Begin != p1End) + { + p1Begin = eastl::find(p1Begin, p1End, *p2Begin); + if(p1Begin == p1End) + return p1End; + + pTemp = pTemp1; + pCurrent = p1Begin; + if(++pCurrent == p1End) + return p1End; + + while(*pCurrent == *pTemp) + { + if(++pTemp == p2End) + return p1Begin; + if(++pCurrent == p1End) + return p1End; + } + + ++p1Begin; + } + + return p1Begin; + } + + + // CharTypeStringRSearch + // Specialized value_type version of STL find_end() function (which really is a reverse search function). + // Purpose: find last instance of p2 within p1. Return p1End if not found or if either string is zero length. + template + const T* CharTypeStringRSearch(const T* p1Begin, const T* p1End, + const T* p2Begin, const T* p2End) + { + // Test for zero length strings, in which case we have a match or a failure, + // but the return value is the same either way. + if((p1Begin == p1End) || (p2Begin == p2End)) + return p1Begin; + + // Test for a pattern of length 1. + if((p2Begin + 1) == p2End) + return CharTypeStringFindEnd(p1Begin, p1End, *p2Begin); + + // Test for search string length being longer than string length. + if((p2End - p2Begin) > (p1End - p1Begin)) + return p1End; + + // General case. + const T* pSearchEnd = (p1End - (p2End - p2Begin) + 1); + + const T* pMatchCandidate; + while((pMatchCandidate = CharTypeStringFindEnd(p1Begin, pSearchEnd, *p2Begin)) != pSearchEnd) + { + // In this case, *pMatchCandidate == *p2Begin. So compare the rest. + const T* pCurrent1 = pMatchCandidate; + const T* pCurrent2 = p2Begin; + while(*pCurrent1++ == *pCurrent2++) + { + if(pCurrent2 == p2End) + return (pCurrent1 - (p2End - p2Begin)); + } + + // This match failed, search again with this new end. + pSearchEnd = pMatchCandidate; + } + + return p1End; + } + + + // CharTypeStringFindFirstOf + // Specialized value_type version of STL find_first_of() function. + // This function is much like the C runtime strtok function, except the strings aren't null-terminated. + template + inline const T* CharTypeStringFindFirstOf(const T* p1Begin, const T* p1End, const T* p2Begin, const T* p2End) + { + for (; p1Begin != p1End; ++p1Begin) + { + for (const T* pTemp = p2Begin; pTemp != p2End; ++pTemp) + { + if (*p1Begin == *pTemp) + return p1Begin; + } + } + return p1End; + } + + + // CharTypeStringRFindFirstNotOf + // Specialized value_type version of STL find_first_not_of() function in reverse. + template + inline const T* CharTypeStringRFindFirstNotOf(const T* p1RBegin, const T* p1REnd, const T* p2Begin, const T* p2End) + { + for (; p1RBegin != p1REnd; --p1RBegin) + { + const T* pTemp; + for (pTemp = p2Begin; pTemp != p2End; ++pTemp) + { + if (*(p1RBegin - 1) == *pTemp) + break; + } + if (pTemp == p2End) + return p1RBegin; + } + return p1REnd; + } + + + // CharTypeStringFindFirstNotOf + // Specialized value_type version of STL find_first_not_of() function. + template + inline const T* CharTypeStringFindFirstNotOf(const T* p1Begin, const T* p1End, const T* p2Begin, const T* p2End) + { + for (; p1Begin != p1End; ++p1Begin) + { + const T* pTemp; + for (pTemp = p2Begin; pTemp != p2End; ++pTemp) + { + if (*p1Begin == *pTemp) + break; + } + if (pTemp == p2End) + return p1Begin; + } + return p1End; + } + + + // CharTypeStringRFindFirstOf + // Specialized value_type version of STL find_first_of() function in reverse. + // This function is much like the C runtime strtok function, except the strings aren't null-terminated. + template + inline const T* CharTypeStringRFindFirstOf(const T* p1RBegin, const T* p1REnd, const T* p2Begin, const T* p2End) + { + for (; p1RBegin != p1REnd; --p1RBegin) + { + for (const T* pTemp = p2Begin; pTemp != p2End; ++pTemp) + { + if (*(p1RBegin - 1) == *pTemp) + return p1RBegin; + } + } + return p1REnd; + } + + + // CharTypeStringRFind + // Specialized value_type version of STL find() function in reverse. + template + inline const T* CharTypeStringRFind(const T* pRBegin, const T* pREnd, const T c) + { + while (pRBegin > pREnd) + { + if (*(pRBegin - 1) == c) + return pRBegin; + --pRBegin; + } + return pREnd; + } + + + inline char* CharStringUninitializedFillN(char* pDestination, size_t n, const char c) + { + if(n) // Some compilers (e.g. GCC 4.3+) generate a warning (which can't be disabled) if you call memset with a size of 0. + memset(pDestination, (uint8_t)c, (size_t)n); + return pDestination + n; + } + + template + inline T* CharStringUninitializedFillN(T* pDestination, size_t n, const T c) + { + T * pDest = pDestination; + const T* const pEnd = pDestination + n; + while(pDest < pEnd) + *pDest++ = c; + return pDestination + n; + } + + + inline char* CharTypeAssignN(char* pDestination, size_t n, char c) + { + if(n) // Some compilers (e.g. GCC 4.3+) generate a warning (which can't be disabled) if you call memset with a size of 0. + return (char*)memset(pDestination, c, (size_t)n); + return pDestination; + } + + template + inline T* CharTypeAssignN(T* pDestination, size_t n, T c) + { + T* pDest = pDestination; + const T* const pEnd = pDestination + n; + while(pDest < pEnd) + *pDest++ = c; + return pDestination; + } +} // namespace eastl + +#endif // EASTL_CHAR_TRAITS_H diff --git a/external/EASTL/include/EASTL/internal/concepts.h b/external/EASTL/include/EASTL/internal/concepts.h new file mode 100644 index 00000000..eea184e1 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/concepts.h @@ -0,0 +1,48 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTERNAL_CONCEPTS_H +#define EASTL_INTERNAL_CONCEPTS_H + + +#include +#if defined(EA_PRAGMA_ONCE_SUPPORTED) +#pragma once +#endif + +#include + +namespace eastl +{ + namespace internal + { + template + using detect_explicitely_convertible = decltype(static_cast(declval())); + + namespace concepts + { + template + constexpr bool destructible = is_nothrow_destructible_v; + + template + constexpr bool constructible_from = destructible && is_constructible_v; + + template + constexpr bool convertible_to = + is_convertible_v && is_detected_v; + + template + constexpr bool move_constructible = constructible_from && convertible_to; + + template + constexpr bool copy_constructible = + move_constructible && constructible_from && convertible_to && + constructible_from && convertible_to && constructible_from && + convertible_to; + } // namespace concepts + } // namespace internal +} // namespace eastl + +#endif \ No newline at end of file diff --git a/external/EASTL/include/EASTL/internal/config.h b/external/EASTL/include/EASTL/internal/config.h new file mode 100644 index 00000000..6c13117b --- /dev/null +++ b/external/EASTL/include/EASTL/internal/config.h @@ -0,0 +1,1839 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTERNAL_CONFIG_H +#define EASTL_INTERNAL_CONFIG_H + + +/////////////////////////////////////////////////////////////////////////////// +// ReadMe +// +// This is the EASTL configuration file. All configurable parameters of EASTL +// are controlled through this file. However, all the settings here can be +// manually overridden by the user. There are three ways for a user to override +// the settings in this file: +// +// - Simply edit this file. +// - Define EASTL_USER_CONFIG_HEADER. +// - Predefine individual defines (e.g. EASTL_ASSERT). +// +/////////////////////////////////////////////////////////////////////////////// + + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_USER_CONFIG_HEADER +// +// This allows the user to define a header file to be #included before the +// EASTL config.h contents are compiled. A primary use of this is to override +// the contents of this config.h file. Note that all the settings below in +// this file are user-overridable. +// +// Example usage: +// #define EASTL_USER_CONFIG_HEADER "MyConfigOverrides.h" +// #include +// +/////////////////////////////////////////////////////////////////////////////// + +#ifdef EASTL_USER_CONFIG_HEADER + #include EASTL_USER_CONFIG_HEADER +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_EABASE_DISABLED +// +// The user can disable EABase usage and manually supply the configuration +// via defining EASTL_EABASE_DISABLED and defining the appropriate entities +// globally or via the above EASTL_USER_CONFIG_HEADER. +// +// Example usage: +// #define EASTL_EABASE_DISABLED +// #include +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_EABASE_DISABLED + #include + #include +#endif +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_VERSION +// +// We more or less follow the conventional EA packaging approach to versioning +// here. A primary distinction here is that minor versions are defined as two +// digit entities (e.g. .03") instead of minimal digit entities ".3"). The logic +// here is that the value is a counter and not a floating point fraction. +// Note that the major version doesn't have leading zeros. +// +// Example version strings: +// "0.91.00" // Major version 0, minor version 91, patch version 0. +// "1.00.00" // Major version 1, minor and patch version 0. +// "3.10.02" // Major version 3, minor version 10, patch version 02. +// "12.03.01" // Major version 12, minor version 03, patch version +// +// Example usage: +// printf("EASTL version: %s", EASTL_VERSION); +// printf("EASTL version: %d.%d.%d", EASTL_VERSION_N / 10000 % 100, EASTL_VERSION_N / 100 % 100, EASTL_VERSION_N % 100); +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_VERSION + #define EASTL_VERSION "3.21.23" + #define EASTL_VERSION_N 32123 +#endif + + +/////////////////////////////////////////////////////////////////////////////// +// EA_COMPILER_NO_STANDARD_CPP_LIBRARY +// +// Defined as 1 or undefined. +// Implements support for the definition of EA_COMPILER_NO_STANDARD_CPP_LIBRARY for the case +// of using EABase versions prior to the addition of its EA_COMPILER_NO_STANDARD_CPP_LIBRARY support. +// +#if !defined(EA_COMPILER_NO_STANDARD_CPP_LIBRARY) + #if defined(EA_PLATFORM_ANDROID) + // Disabled because EA's eaconfig/android_config/android_sdk packages currently + // don't support linking STL libraries. Perhaps we can figure out what linker arguments + // are needed for an app so we can manually specify them and then re-enable this code. + // + //#include + // + //#if (__ANDROID_API__ < 9) // Earlier versions of Android provide no std C++ STL implementation. + #define EA_COMPILER_NO_STANDARD_CPP_LIBRARY 1 + //#endif + #endif +#endif + + +/////////////////////////////////////////////////////////////////////////////// +// EA_NOEXCEPT +// +// Defined as a macro. Provided here for backward compatibility with older +// EABase versions prior to 2.00.40 that don't yet define it themselves. +// +#if !defined(EA_NOEXCEPT) + #define EA_NOEXCEPT + #define EA_NOEXCEPT_IF(predicate) + #define EA_NOEXCEPT_EXPR(expression) false +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EA_CPP14_CONSTEXPR +// +// Defined as constexpr when a C++14 compiler is present. Defines it as nothing +// when using a C++11 compiler. +// C++14 relaxes the specification for constexpr such that it allows more +// kinds of expressions. Since a C++11 compiler doesn't allow this, we need +// to make a unique define for C++14 constexpr. This macro should be used only +// when you are using it with code that specfically requires C++14 constexpr +// functionality beyond the regular C++11 constexpr functionality. +// http://en.wikipedia.org/wiki/C%2B%2B14#Relaxed_constexpr_restrictions +// +#if !defined(EA_CPP14_CONSTEXPR) + #if defined(EA_COMPILER_CPP14_ENABLED) + #define EA_CPP14_CONSTEXPR constexpr + #else + #define EA_CPP14_CONSTEXPR // not supported + #define EA_NO_CPP14_CONSTEXPR + #endif +#endif + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL namespace +// +// We define this so that users that #include this config file can reference +// these namespaces without seeing any other files that happen to use them. +/////////////////////////////////////////////////////////////////////////////// + +/// EA Standard Template Library +namespace eastl +{ + // Intentionally empty. +} + + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_DEBUG +// +// Defined as an integer >= 0. Default is 1 for debug builds and 0 for +// release builds. This define is also a master switch for the default value +// of some other settings. +// +// Example usage: +// #if EASTL_DEBUG +// ... +// #endif +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_DEBUG + #if defined(EA_DEBUG) || defined(_DEBUG) + #define EASTL_DEBUG 1 + #else + #define EASTL_DEBUG 0 + #endif +#endif + +// Developer debug. Helps EASTL developers assert EASTL is coded correctly. +// Normally disabled for users since it validates internal things and not user things. +#ifndef EASTL_DEV_DEBUG + #define EASTL_DEV_DEBUG 0 +#endif + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_DEBUGPARAMS_LEVEL +// +// EASTL_DEBUGPARAMS_LEVEL controls what debug information is passed through to +// the allocator by default. +// This value may be defined by the user ... if not it will default to 1 for +// EA_DEBUG builds, otherwise 0. +// +// 0 - no debug information is passed through to allocator calls. +// 1 - 'name' is passed through to allocator calls. +// 2 - 'name', __FILE__, and __LINE__ are passed through to allocator calls. +// +// This parameter mirrors the equivalent parameter in the CoreAllocator package. +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_DEBUGPARAMS_LEVEL + #if EASTL_DEBUG + #define EASTL_DEBUGPARAMS_LEVEL 2 + #else + #define EASTL_DEBUGPARAMS_LEVEL 0 + #endif +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_DLL +// +// Defined as 0 or 1. The default is dependent on the definition of EA_DLL. +// If EA_DLL is defined, then EASTL_DLL is 1, else EASTL_DLL is 0. +// EA_DLL is a define that controls DLL builds within the EAConfig build system. +// EASTL_DLL controls whether EASTL is built and used as a DLL. +// Normally you wouldn't do such a thing, but there are use cases for such +// a thing, particularly in the case of embedding C++ into C# applications. +// +#ifndef EASTL_DLL + #if defined(EA_DLL) + #define EASTL_DLL 1 + #else + #define EASTL_DLL 0 + #endif +#endif + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_IF_NOT_DLL +// +// Utility to include expressions only for static builds. +// +#ifndef EASTL_IF_NOT_DLL + #if EASTL_DLL + #define EASTL_IF_NOT_DLL(x) + #else + #define EASTL_IF_NOT_DLL(x) x + #endif +#endif + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_API +// +// This is used to label functions as DLL exports under Microsoft platforms. +// If EA_DLL is defined, then the user is building EASTL as a DLL and EASTL's +// non-templated functions will be exported. EASTL template functions are not +// labelled as EASTL_API (and are thus not exported in a DLL build). This is +// because it's not possible (or at least unsafe) to implement inline templated +// functions in a DLL. +// +// Example usage of EASTL_API: +// EASTL_API int someVariable = 10; // Export someVariable in a DLL build. +// +// struct EASTL_API SomeClass{ // Export SomeClass and its member functions in a DLL build. +// EASTL_LOCAL void PrivateMethod(); // Not exported. +// }; +// +// EASTL_API void SomeFunction(); // Export SomeFunction in a DLL build. +// +// +#if defined(EA_DLL) && !defined(EASTL_DLL) + #define EASTL_DLL 1 +#endif + +#ifndef EASTL_API // If the build file hasn't already defined this to be dllexport... + #if EASTL_DLL + #if defined(_MSC_VER) + #define EASTL_API __declspec(dllimport) + #define EASTL_LOCAL + #elif defined(__CYGWIN__) + #define EASTL_API __attribute__((dllimport)) + #define EASTL_LOCAL + #elif (defined(__GNUC__) && (__GNUC__ >= 4)) + #define EASTL_API __attribute__ ((visibility("default"))) + #define EASTL_LOCAL __attribute__ ((visibility("hidden"))) + #else + #define EASTL_API + #define EASTL_LOCAL + #endif + #else + #define EASTL_API + #define EASTL_LOCAL + #endif +#endif + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_EASTDC_API +// +// This is used for importing EAStdC functions into EASTL, possibly via a DLL import. +// +#ifndef EASTL_EASTDC_API + #if EASTL_DLL + #if defined(_MSC_VER) + #define EASTL_EASTDC_API __declspec(dllimport) + #define EASTL_EASTDC_LOCAL + #elif defined(__CYGWIN__) + #define EASTL_EASTDC_API __attribute__((dllimport)) + #define EASTL_EASTDC_LOCAL + #elif (defined(__GNUC__) && (__GNUC__ >= 4)) + #define EASTL_EASTDC_API __attribute__ ((visibility("default"))) + #define EASTL_EASTDC_LOCAL __attribute__ ((visibility("hidden"))) + #else + #define EASTL_EASTDC_API + #define EASTL_EASTDC_LOCAL + #endif + #else + #define EASTL_EASTDC_API + #define EASTL_EASTDC_LOCAL + #endif +#endif + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_EASTDC_VSNPRINTF +// +// Defined as 0 or 1. By default it is 1. +// +// When enabled EASTL uses EAStdC's Vsnprintf function directly instead of +// having the user provide a global Vsnprintf8/16/32 function. The benefit +// of this is that it will allow EASTL to just link to EAStdC's Vsnprintf +// without the user doing anything. The downside is that any users who aren't +// already using EAStdC will either need to now depend on EAStdC or globally +// define this property to be 0 and simply provide functions that have the same +// names. See the usage of EASTL_EASTDC_VSNPRINTF in string.h for more info. +// +#if !defined(EASTL_EASTDC_VSNPRINTF) + #define EASTL_EASTDC_VSNPRINTF 1 +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_NAME_ENABLED / EASTL_NAME / EASTL_NAME_VAL +// +// Used to wrap debug string names. In a release build, the definition +// goes away. These are present to avoid release build compiler warnings +// and to make code simpler. +// +// Example usage of EASTL_NAME: +// // pName will defined away in a release build and thus prevent compiler warnings. +// void allocator::set_name(const char* EASTL_NAME(pName)) +// { +// #if EASTL_NAME_ENABLED +// mpName = pName; +// #endif +// } +// +// Example usage of EASTL_NAME_VAL: +// // "xxx" is defined to NULL in a release build. +// vector::vector(const allocator_type& allocator = allocator_type(EASTL_NAME_VAL("xxx"))); +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_NAME_ENABLED + #define EASTL_NAME_ENABLED EASTL_DEBUG +#endif + +#ifndef EASTL_NAME + #if EASTL_NAME_ENABLED + #define EASTL_NAME(x) x + #define EASTL_NAME_VAL(x) x + #else + #define EASTL_NAME(x) + #define EASTL_NAME_VAL(x) ((const char*)NULL) + #endif +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_DEFAULT_NAME_PREFIX +// +// Defined as a string literal. Defaults to "EASTL". +// This define is used as the default name for EASTL where such a thing is +// referenced in EASTL. For example, if the user doesn't specify an allocator +// name for their deque, it is named "EASTL deque". However, you can override +// this to say "SuperBaseball deque" by changing EASTL_DEFAULT_NAME_PREFIX. +// +// Example usage (which is simply taken from how deque.h uses this define): +// #ifndef EASTL_DEQUE_DEFAULT_NAME +// #define EASTL_DEQUE_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " deque" +// #endif +// +#ifndef EASTL_DEFAULT_NAME_PREFIX + #define EASTL_DEFAULT_NAME_PREFIX "EASTL" +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_ASSERT_ENABLED +// +// Defined as 0 or non-zero. Default is same as EASTL_DEBUG. +// If EASTL_ASSERT_ENABLED is non-zero, then asserts will be executed via +// the assertion mechanism. +// +// Example usage: +// #if EASTL_ASSERT_ENABLED +// EASTL_ASSERT(v.size() > 17); +// #endif +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_ASSERT_ENABLED + #define EASTL_ASSERT_ENABLED EASTL_DEBUG +#endif + +// Developer assert. Helps EASTL developers assert EASTL is coded correctly. +// Normally disabled for users since it validates internal things and not user things. +#ifndef EASTL_DEV_ASSERT_ENABLED + #define EASTL_DEV_ASSERT_ENABLED EASTL_DEV_DEBUG +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_EMPTY_REFERENCE_ASSERT_ENABLED +// +// Defined as 0 or non-zero. Default is same as EASTL_ASSERT_ENABLED. +// This is like EASTL_ASSERT_ENABLED, except it fires asserts specifically for +// a container operation that returns a reference while the container is empty. +// Sometimes people like to be able to take a reference to the front of the +// container, but won't use it if the container is empty. This may or may not +// be undefined behaviour depending on the container. +// +// In practice, for expressions such as &vector[0] this is not an issue - +// at least if the subscript operator is inlined because the expression will +// be equivalent to &*(nullptr) and optimized ti nullptr. MSVC, Clang and GCC +// all have this behaviour and UBSan & ASan report no issues with that code. +// +// Code that relies on this macro being disabled should instead use the +// container's data() member function. The range [data(), data() + size()) +// is always valid, even when the container is empty (in which case data() +// is not dereferencable). +// +// Enabling this macro adds asserts if the container is empty and the function +// invocation is well defined. If the implementation may invoke UB, or the +// container is non-empty, then the assert fires if EASTL_ASSERT_ENABLED is +// enabled, regardless of this macro. +// +// NOTE: If this is enabled, EASTL_ASSERT_ENABLED must also be enabled to +// have any effect. +// +// Example usage: +// template +// inline typename vector::reference +// vector::front() +// { +// #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED +// EASTL_ASSERT(mpEnd > mpBegin); +// #endif +// +// return *mpBegin; +// } +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_EMPTY_REFERENCE_ASSERT_ENABLED + #define EASTL_EMPTY_REFERENCE_ASSERT_ENABLED EASTL_ASSERT_ENABLED +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// SetAssertionFailureFunction +// +// Allows the user to set a custom assertion failure mechanism. +// +// Example usage: +// void Assert(const char* pExpression, void* pContext); +// SetAssertionFailureFunction(Assert, this); +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_ASSERTION_FAILURE_DEFINED + #define EASTL_ASSERTION_FAILURE_DEFINED + + namespace eastl + { + typedef void (*EASTL_AssertionFailureFunction)(const char* pExpression, void* pContext); + EASTL_API void SetAssertionFailureFunction(EASTL_AssertionFailureFunction pFunction, void* pContext); + + // These are the internal default functions that implement asserts. + EASTL_API void AssertionFailure(const char* pExpression); + EASTL_API void AssertionFailureFunctionDefault(const char* pExpression, void* pContext); + } +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_ASSERT +// +// Assertion macro. Can be overridden by user with a different value. +// +// Example usage: +// EASTL_ASSERT(intVector.size() < 100); +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_ASSERT + #if EASTL_ASSERT_ENABLED + #define EASTL_ASSERT(expression) \ + EA_DISABLE_VC_WARNING(4127) \ + do { \ + EA_ANALYSIS_ASSUME(expression); \ + (void)((expression) || (eastl::AssertionFailure(#expression), 0)); \ + } while (0) \ + EA_RESTORE_VC_WARNING() + #else + #define EASTL_ASSERT(expression) + #endif +#endif + +// Developer assert. Helps EASTL developers assert EASTL is coded correctly. +// Normally disabled for users since it validates internal things and not user things. +#ifndef EASTL_DEV_ASSERT + #if EASTL_DEV_ASSERT_ENABLED + #define EASTL_DEV_ASSERT(expression) \ + EA_DISABLE_VC_WARNING(4127) \ + do { \ + EA_ANALYSIS_ASSUME(expression); \ + (void)((expression) || (eastl::AssertionFailure(#expression), 0)); \ + } while(0) \ + EA_RESTORE_VC_WARNING() + #else + #define EASTL_DEV_ASSERT(expression) + #endif +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_ASSERT_MSG +// +// Example usage: +// EASTL_ASSERT_MSG(false, "detected error condition!"); +// +/////////////////////////////////////////////////////////////////////////////// +#ifndef EASTL_ASSERT_MSG + #if EASTL_ASSERT_ENABLED + #define EASTL_ASSERT_MSG(expression, message) \ + EA_DISABLE_VC_WARNING(4127) \ + do { \ + EA_ANALYSIS_ASSUME(expression); \ + (void)((expression) || (eastl::AssertionFailure(message), 0)); \ + } while (0) \ + EA_RESTORE_VC_WARNING() + #else + #define EASTL_ASSERT_MSG(expression, message) + #endif +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_FAIL_MSG +// +// Failure macro. Can be overridden by user with a different value. +// +// Example usage: +// EASTL_FAIL("detected error condition!"); +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_FAIL_MSG + #if EASTL_ASSERT_ENABLED + #define EASTL_FAIL_MSG(message) (eastl::AssertionFailure(message)) + #else + #define EASTL_FAIL_MSG(message) + #endif +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_CT_ASSERT / EASTL_CT_ASSERT_NAMED +// +// EASTL_CT_ASSERT is a macro for compile time assertion checks, useful for +// validating *constant* expressions. The advantage over using EASTL_ASSERT +// is that errors are caught at compile time instead of runtime. +// +// Example usage: +// EASTL_CT_ASSERT(sizeof(uint32_t) == 4); +// +/////////////////////////////////////////////////////////////////////////////// + +#define EASTL_CT_ASSERT(expression) static_assert(expression, #expression) + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_CT_ASSERT_MSG +// +// EASTL_CT_ASSERT_MSG is a macro for compile time assertion checks, useful for +// validating *constant* expressions. The advantage over using EASTL_ASSERT +// is that errors are caught at compile time instead of runtime. +// The message must be a string literal. +// +// Example usage: +// EASTL_CT_ASSERT_MSG(sizeof(uint32_t) == 4, "The size of uint32_t must be 4."); +// +/////////////////////////////////////////////////////////////////////////////// + +#define EASTL_CT_ASSERT_MSG(expression, message) static_assert(expression, message) + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_DEBUG_BREAK / EASTL_DEBUG_BREAK_OVERRIDE +// +// This function causes an app to immediately stop under the debugger. +// It is implemented as a macro in order to allow stopping at the site +// of the call. +// +// EASTL_DEBUG_BREAK_OVERRIDE allows one to define EASTL_DEBUG_BREAK directly. +// This is useful in cases where you desire to disable EASTL_DEBUG_BREAK +// but do not wish to (or cannot) define a custom void function() to replace +// EASTL_DEBUG_BREAK callsites. +// +// Example usage: +// EASTL_DEBUG_BREAK(); +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_DEBUG_BREAK_OVERRIDE + #ifndef EASTL_DEBUG_BREAK + #if defined(_MSC_VER) && (_MSC_VER >= 1300) + #define EASTL_DEBUG_BREAK() __debugbreak() // This is a compiler intrinsic which will map to appropriate inlined asm for the platform. + #elif defined(EA_PLATFORM_NINTENDO) + #define EASTL_DEBUG_BREAK() __builtin_debugtrap() // Consider using the CLANG define + #elif (defined(EA_PROCESSOR_ARM) && !defined(EA_PROCESSOR_ARM64)) && defined(__APPLE__) + #define EASTL_DEBUG_BREAK() asm("trap") + #elif defined(EA_PROCESSOR_ARM64) && defined(__APPLE__) + #include + #include + #define EASTL_DEBUG_BREAK() kill( getpid(), SIGINT ) + #elif defined(EA_PROCESSOR_ARM64) && defined(__GNUC__) + #define EASTL_DEBUG_BREAK() asm("brk 10") + #elif defined(EA_PROCESSOR_ARM) && defined(__GNUC__) + #define EASTL_DEBUG_BREAK() asm("BKPT 10") // The 10 is arbitrary. It's just a unique id. + #elif defined(EA_PROCESSOR_ARM) && defined(__ARMCC_VERSION) + #define EASTL_DEBUG_BREAK() __breakpoint(10) + #elif defined(EA_PROCESSOR_POWERPC) // Generic PowerPC. + #define EASTL_DEBUG_BREAK() asm(".long 0") // This triggers an exception by executing opcode 0x00000000. + #elif (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64)) && defined(EA_ASM_STYLE_INTEL) + #define EASTL_DEBUG_BREAK() { __asm int 3 } + #elif (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64)) && (defined(EA_ASM_STYLE_ATT) || defined(__GNUC__)) + #define EASTL_DEBUG_BREAK() asm("int3") + #else + void EASTL_DEBUG_BREAK(); // User must define this externally. + #endif + #else + void EASTL_DEBUG_BREAK(); // User must define this externally. + #endif +#else + #ifndef EASTL_DEBUG_BREAK + #if EASTL_DEBUG_BREAK_OVERRIDE == 1 + // define an empty callable to satisfy the call site. + #define EASTL_DEBUG_BREAK ([]{}) + #else + #define EASTL_DEBUG_BREAK EASTL_DEBUG_BREAK_OVERRIDE + #endif + #else + #error EASTL_DEBUG_BREAK is already defined yet you would like to override it. Please ensure no other headers are already defining EASTL_DEBUG_BREAK before this header (config.h) is included + #endif +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_CRASH +// +// Executes an invalid memory write, which should result in an exception +// on most platforms. +// +/////////////////////////////////////////////////////////////////////////////// + +#define EASTL_CRASH() *((volatile int*)0) = 0xDEADC0DE; + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_ALLOCATOR_COPY_ENABLED +// +// Defined as 0 or 1. Default is 0 (disabled) until some future date. +// If enabled (1) then container operator= copies the allocator from the +// source container. It ideally should be set to enabled but for backwards +// compatibility with older versions of EASTL it is currently set to 0. +// Regardless of whether this value is 0 or 1, this container copy constructs +// or copy assigns allocators. +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_ALLOCATOR_COPY_ENABLED + #define EASTL_ALLOCATOR_COPY_ENABLED 0 +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_FIXED_SIZE_TRACKING_ENABLED +// +// Defined as an integer >= 0. Default is same as EASTL_DEBUG. +// If EASTL_FIXED_SIZE_TRACKING_ENABLED is enabled, then fixed +// containers in debug builds track the max count of objects +// that have been in the container. This allows for the tuning +// of fixed container sizes to their minimum required size. +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_FIXED_SIZE_TRACKING_ENABLED + #define EASTL_FIXED_SIZE_TRACKING_ENABLED EASTL_DEBUG +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_RTTI_ENABLED +// +// Defined as 0 or 1. Default is 1 if RTTI is supported by the compiler. +// This define exists so that we can use some dynamic_cast operations in the +// code without warning. dynamic_cast is only used if the specifically refers +// to it; EASTL won't do dynamic_cast behind your back. +// +// Example usage: +// #if EASTL_RTTI_ENABLED +// pChildClass = dynamic_cast(pParentClass); +// #endif +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_RTTI_ENABLED + // The VC++ default Standard Library (Dinkumware) disables major parts of RTTI + // (e.g. type_info) if exceptions are disabled, even if RTTI itself is enabled. + // _HAS_EXCEPTIONS is defined by Dinkumware to 0 or 1 (disabled or enabled). + #if defined(EA_COMPILER_NO_RTTI) || (defined(_MSC_VER) && defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && !(defined(_HAS_EXCEPTIONS) && _HAS_EXCEPTIONS)) + #define EASTL_RTTI_ENABLED 0 + #else + #define EASTL_RTTI_ENABLED 1 + #endif +#endif + + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_EXCEPTIONS_ENABLED +// +// Defined as 0 or 1. Default is to follow what the compiler settings are. +// The user can predefine EASTL_EXCEPTIONS_ENABLED to 0 or 1; however, if the +// compiler is set to disable exceptions then EASTL_EXCEPTIONS_ENABLED is +// forced to a value of 0 regardless of the user predefine. +// +// Note that we do not enable EASTL exceptions by default if the compiler +// has exceptions enabled. To enable EASTL_EXCEPTIONS_ENABLED you need to +// manually set it to 1. +// +/////////////////////////////////////////////////////////////////////////////// + +#if !defined(EASTL_EXCEPTIONS_ENABLED) || ((EASTL_EXCEPTIONS_ENABLED == 1) && defined(EA_COMPILER_NO_EXCEPTIONS)) + #define EASTL_EXCEPTIONS_ENABLED 0 +#endif + + + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_STRING_OPT_XXXX +// +// Enables some options / optimizations options that cause the string class +// to behave slightly different from the C++ standard basic_string. These are +// options whereby you can improve performance by avoiding operations that +// in practice may never occur for you. +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_STRING_OPT_EXPLICIT_CTORS + // Defined as 0 or 1. Default is 0. + // Defines if we should implement explicity in constructors where the C++ + // standard string does not. The advantage of enabling explicit constructors + // is that you can do this: string s = "hello"; in addition to string s("hello"); + // The disadvantage of enabling explicity constructors is that there can be + // silent conversions done which impede performance if the user isn't paying + // attention. + // C++ standard string ctors are not explicit. + #define EASTL_STRING_OPT_EXPLICIT_CTORS 0 +#endif + +#ifndef EASTL_STRING_OPT_LENGTH_ERRORS + // Defined as 0 or 1. Default is equal to EASTL_EXCEPTIONS_ENABLED. + // Defines if we check for string values going beyond kMaxSize + // (a very large value) and throw exections if so. + // C++ standard strings are expected to do such checks. + #define EASTL_STRING_OPT_LENGTH_ERRORS EASTL_EXCEPTIONS_ENABLED +#endif + +#ifndef EASTL_STRING_OPT_RANGE_ERRORS + // Defined as 0 or 1. Default is equal to EASTL_EXCEPTIONS_ENABLED. + // Defines if we check for out-of-bounds references to string + // positions and throw exceptions if so. Well-behaved code shouldn't + // refence out-of-bounds positions and so shouldn't need these checks. + // C++ standard strings are expected to do such range checks. + #define EASTL_STRING_OPT_RANGE_ERRORS EASTL_EXCEPTIONS_ENABLED +#endif + +#ifndef EASTL_STRING_OPT_ARGUMENT_ERRORS + // Defined as 0 or 1. Default is 0. + // Defines if we check for NULL ptr arguments passed to string + // functions by the user and throw exceptions if so. Well-behaved code + // shouldn't pass bad arguments and so shouldn't need these checks. + // Also, some users believe that strings should check for NULL pointers + // in all their arguments and do no-ops if so. This is very debatable. + // C++ standard strings are not required to check for such argument errors. + #define EASTL_STRING_OPT_ARGUMENT_ERRORS 0 +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_BITSET_SIZE_T +// +// Defined as 0 or 1. Default is 1. +// Controls whether bitset uses size_t or eastl_size_t. +// +#ifndef EASTL_BITSET_SIZE_T + #define EASTL_BITSET_SIZE_T 1 +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_INT128_SUPPORTED +// +// Defined as 0 or 1. +// +#ifndef EASTL_INT128_SUPPORTED + #if defined(EA_COMPILER_INTMAX_SIZE) && (EA_COMPILER_INTMAX_SIZE >= 16) + #define EASTL_INT128_SUPPORTED 1 + #else + #define EASTL_INT128_SUPPORTED 0 + #endif +#endif + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_GCC_STYLE_INT128_SUPPORTED +// +// Defined as 0 or 1. +// Specifies whether __int128_t/__uint128_t are defined. +// +#ifndef EASTL_GCC_STYLE_INT128_SUPPORTED +#if EASTL_INT128_SUPPORTED && (defined(EA_COMPILER_GNUC) || defined(__clang__)) +#define EASTL_GCC_STYLE_INT128_SUPPORTED 1 +#else +#define EASTL_GCC_STYLE_INT128_SUPPORTED 0 +#endif +#endif + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_INT128_DEFINED +// +// Defined as 0 or 1. +// Specifies whether eastl_int128_t/eastl_uint128_t have been typedef'd yet. +// NB: these types are not considered fundamental, arithmetic or integral when using the EAStdC implementation. +// this changes the compiler type traits defined in type_traits.h. +// eg. is_signed::value may be false, because it is not arithmetic. +// +#ifndef EASTL_INT128_DEFINED + #if EASTL_INT128_SUPPORTED + #define EASTL_INT128_DEFINED 1 + + #if EASTL_GCC_STYLE_INT128_SUPPORTED + typedef __int128_t eastl_int128_t; + typedef __uint128_t eastl_uint128_t; + #else + typedef int128_t eastl_int128_t; // The EAStdC package defines an EA::StdC::int128_t and uint128_t type, + typedef uint128_t eastl_uint128_t; // though they are currently within the EA::StdC namespace. + #endif + #endif +#endif + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_BITSET_WORD_TYPE_DEFAULT / EASTL_BITSET_WORD_SIZE_DEFAULT +// +// Defined as an integral power of two type, usually uint32_t or uint64_t. +// Specifies the word type that bitset should use internally to implement +// storage. By default this is the platform register word size, but there +// may be reasons to use a different value. +// +// Defines the integral data type used by bitset by default. +// You can override this default on a bitset-by-bitset case by supplying a +// custom bitset WordType template parameter. +// +// The C++ standard specifies that the std::bitset word type be unsigned long, +// but that isn't necessarily the most efficient data type for the given platform. +// We can follow the standard and be potentially less efficient or we can do what +// is more efficient but less like the C++ std::bitset. +// +#if !defined(EASTL_BITSET_WORD_TYPE_DEFAULT) + #if defined(EASTL_BITSET_WORD_SIZE) // EASTL_BITSET_WORD_SIZE is deprecated, but we temporarily support the ability for the user to specify it. Use EASTL_BITSET_WORD_TYPE_DEFAULT instead. + #if (EASTL_BITSET_WORD_SIZE == 4) + #define EASTL_BITSET_WORD_TYPE_DEFAULT uint32_t + #define EASTL_BITSET_WORD_SIZE_DEFAULT 4 + #else + #define EASTL_BITSET_WORD_TYPE_DEFAULT uint64_t + #define EASTL_BITSET_WORD_SIZE_DEFAULT 8 + #endif + #elif (EA_PLATFORM_WORD_SIZE == 16) // EA_PLATFORM_WORD_SIZE is defined in EABase. + #define EASTL_BITSET_WORD_TYPE_DEFAULT uint128_t + #define EASTL_BITSET_WORD_SIZE_DEFAULT 16 + #elif (EA_PLATFORM_WORD_SIZE == 8) + #define EASTL_BITSET_WORD_TYPE_DEFAULT uint64_t + #define EASTL_BITSET_WORD_SIZE_DEFAULT 8 + #elif (EA_PLATFORM_WORD_SIZE == 4) + #define EASTL_BITSET_WORD_TYPE_DEFAULT uint32_t + #define EASTL_BITSET_WORD_SIZE_DEFAULT 4 + #else + #define EASTL_BITSET_WORD_TYPE_DEFAULT uint16_t + #define EASTL_BITSET_WORD_SIZE_DEFAULT 2 + #endif +#endif + + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_LIST_SIZE_CACHE +// +// Defined as 0 or 1. Default is 1. Changed from 0 in version 1.16.01. +// If defined as 1, the list and slist containers (and possibly any additional +// containers as well) keep a member mSize (or similar) variable which allows +// the size() member function to execute in constant time (a.k.a. O(1)). +// There are debates on both sides as to whether it is better to have this +// cached value or not, as having it entails some cost (memory and code). +// To consider: Make list size caching an optional template parameter. +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_LIST_SIZE_CACHE + #define EASTL_LIST_SIZE_CACHE 1 +#endif + +#ifndef EASTL_SLIST_SIZE_CACHE + #define EASTL_SLIST_SIZE_CACHE 1 +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_MAX_STACK_USAGE +// +// Defined as an integer greater than zero. Default is 4000. +// There are some places in EASTL where temporary objects are put on the +// stack. A common example of this is in the implementation of container +// swap functions whereby a temporary copy of the container is made. +// There is a problem, however, if the size of the item created on the stack +// is very large. This can happen with fixed-size containers, for example. +// The EASTL_MAX_STACK_USAGE define specifies the maximum amount of memory +// (in bytes) that the given platform/compiler will safely allow on the stack. +// Platforms such as Windows will generally allow larger values than embedded +// systems or console machines, but it is usually a good idea to stick with +// a max usage value that is portable across all platforms, lest the user be +// surprised when something breaks as it is ported to another platform. +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_MAX_STACK_USAGE + #define EASTL_MAX_STACK_USAGE 4000 +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_VA_COPY_ENABLED +// +// Defined as 0 or 1. Default is 1 for compilers that need it, 0 for others. +// Some compilers on some platforms implement va_list whereby its contents +// are destroyed upon usage, even if passed by value to another function. +// With these compilers you can use va_copy to save and restore a va_list. +// Known compiler/platforms that destroy va_list contents upon usage include: +// CodeWarrior on PowerPC +// GCC on x86-64 +// However, va_copy is part of the C99 standard and not part of earlier C and +// C++ standards. So not all compilers support it. VC++ doesn't support va_copy, +// but it turns out that VC++ doesn't usually need it on the platforms it supports, +// and va_copy can usually be implemented via memcpy(va_list, va_list) with VC++. +// +// Example usage: +// void Function(va_list arguments) +// { +// #if EASTL_VA_COPY_ENABLED +// va_list argumentsCopy; +// va_copy(argumentsCopy, arguments); +// #endif +// +// #if EASTL_VA_COPY_ENABLED +// va_end(argumentsCopy); +// #endif +// } +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_VA_COPY_ENABLED + #if ((defined(__GNUC__) && (__GNUC__ >= 3)) || defined(__clang__)) && (!defined(__i386__) || defined(__x86_64__)) && !defined(__ppc__) && !defined(__PPC__) && !defined(__PPC64__) + #define EASTL_VA_COPY_ENABLED 1 + #else + #define EASTL_VA_COPY_ENABLED 0 + #endif +#endif + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_OPERATOR_EQUALS_OTHER_ENABLED +// +// Defined as 0 or 1. Default is 0 until such day that it's deemed safe. +// When enabled, enables operator= for other char types, e.g. for code +// like this: +// eastl::string8 s8; +// eastl::string16 s16; +// s8 = s16; +// This option is considered experimental, and may exist as such for an +// indefinite amount of time. +// +#if !defined(EASTL_OPERATOR_EQUALS_OTHER_ENABLED) + #define EASTL_OPERATOR_EQUALS_OTHER_ENABLED 0 +#endif +/////////////////////////////////////////////////////////////////////////////// + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_STD_ITERATOR_CATEGORY_ENABLED +// +// Defined as 0 or 1. Default is 0. +// If defined as non-zero, EASTL iterator categories (iterator.h's input_iterator_tag, +// forward_iterator_tag, etc.) are defined to be those from std C++ in the std +// namespace. The reason for wanting to enable such a feature is that it allows +// EASTL containers and algorithms to work with std STL containes and algorithms. +// The default value was changed from 1 to 0 in EASL 1.13.03, January 11, 2012. +// The reason for the change was that almost nobody was taking advantage of it and +// it was slowing down compile times for some compilers quite a bit due to them +// having a lot of headers behind . +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_STD_ITERATOR_CATEGORY_ENABLED + #define EASTL_STD_ITERATOR_CATEGORY_ENABLED 0 +#endif + +#if EASTL_STD_ITERATOR_CATEGORY_ENABLED + #define EASTL_ITC_NS std +#else + #define EASTL_ITC_NS eastl +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_VALIDATION_ENABLED +// +// Defined as an integer >= 0. Default is to be equal to EASTL_DEBUG. +// If nonzero, then a certain amount of automatic runtime validation is done. +// Runtime validation is not considered the same thing as asserting that user +// input values are valid. Validation refers to internal consistency checking +// of the validity of containers and their iterators. Validation checking is +// something that often involves significantly more than basic assertion +// checking, and it may sometimes be desirable to disable it. +// +// Validation sub-features are supported and can be enabled / disabled +// individually. +// +// This macro would generally be used internally by EASTL. +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_VALIDATION_ENABLED + #define EASTL_VALIDATION_ENABLED EASTL_DEBUG +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_VALIDATE_COMPARE +// +// Defined as EASTL_ASSERT or defined away. Default is EASTL_ASSERT if EASTL_VALIDATION_ENABLED is enabled. +// This is used to validate user-supplied comparison functions, particularly for sorting purposes. +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_VALIDATE_COMPARE_ENABLED + #define EASTL_VALIDATE_COMPARE_ENABLED EASTL_VALIDATION_ENABLED +#endif + +#if EASTL_VALIDATE_COMPARE_ENABLED + #define EASTL_VALIDATE_COMPARE EASTL_ASSERT +#else + #define EASTL_VALIDATE_COMPARE(expression) +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_VALIDATE_INTRUSIVE_LIST +// +// Defined as an integral value >= 0. Controls the amount of automatic validation +// done by intrusive_list. A value of 0 means no automatic validation is done. +// As of this writing, EASTL_VALIDATE_INTRUSIVE_LIST defaults to 0, as it makes +// the intrusive_list_node become a non-POD, which may be an issue for some code. +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_VALIDATE_INTRUSIVE_LIST + #define EASTL_VALIDATE_INTRUSIVE_LIST 0 +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_FORCE_INLINE +// +// Defined as a "force inline" expression or defined away. +// You generally don't need to use forced inlining with the Microsoft and +// Metrowerks compilers, but you may need it with the GCC compiler (any version). +// +// Example usage: +// template +// EASTL_FORCE_INLINE typename vector::size_type +// vector::size() const +// { return mpEnd - mpBegin; } +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_FORCE_INLINE + #define EASTL_FORCE_INLINE EA_FORCE_INLINE +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_MAY_ALIAS +// +// Defined as a macro that wraps the GCC may_alias attribute. This attribute +// has no significance for VC++ because VC++ doesn't support the concept of +// strict aliasing. Users should avoid writing code that breaks strict +// aliasing rules; EASTL_MAY_ALIAS is for cases with no alternative. +// +// Example usage: +// uint32_t value EASTL_MAY_ALIAS; +// +// Example usage: +// typedef uint32_t EASTL_MAY_ALIAS value_type; +// value_type value; +// +#if defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 303) && !defined(EA_COMPILER_RVCT) + #define EASTL_MAY_ALIAS __attribute__((__may_alias__)) +#else + #define EASTL_MAY_ALIAS +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_LIKELY / EASTL_UNLIKELY +// +// Defined as a macro which gives a hint to the compiler for branch +// prediction. GCC gives you the ability to manually give a hint to +// the compiler about the result of a comparison, though it's often +// best to compile shipping code with profiling feedback under both +// GCC (-fprofile-arcs) and VC++ (/LTCG:PGO, etc.). However, there +// are times when you feel very sure that a boolean expression will +// usually evaluate to either true or false and can help the compiler +// by using an explicity directive... +// +// Example usage: +// if(EASTL_LIKELY(a == 0)) // Tell the compiler that a will usually equal 0. +// { ... } +// +// Example usage: +// if(EASTL_UNLIKELY(a == 0)) // Tell the compiler that a will usually not equal 0. +// { ... } +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_LIKELY + #if defined(__GNUC__) && (__GNUC__ >= 3) + #define EASTL_LIKELY(x) __builtin_expect(!!(x), true) + #define EASTL_UNLIKELY(x) __builtin_expect(!!(x), false) + #else + #define EASTL_LIKELY(x) (x) + #define EASTL_UNLIKELY(x) (x) + #endif +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE +// +// Defined as 0 or 1; default is based on auto-detection. +// Specifies whether the compiler provides built-in compiler type trait support +// (e.g. __is_abstract()). Does not specify any details about which traits +// are available or what their standards-compliance is. Nevertheless this is a +// useful macro identifier for our type traits implementation. +// +#ifndef EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE + #if defined(_MSC_VER) && (_MSC_VER >= 1500) && !defined(EA_COMPILER_CLANG_CL) // VS2008 or later + #pragma warning(push, 0) + #include + #pragma warning(pop) + #if ((defined(_HAS_TR1) && _HAS_TR1) || _MSC_VER >= 1700) // VS2012 (1700) and later has built-in type traits support. + #define EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE 1 + #else + #define EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE 0 + #endif + #elif defined(__clang__) && defined(__APPLE__) && defined(_CXXCONFIG) // Apple clang but with GCC's libstdc++. + #define EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE 0 + #elif defined(__clang__) + #define EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE 1 + #elif defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4003) && !defined(__GCCXML__) + #define EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE 1 + #elif defined(__MSL_CPP__) && (__MSL_CPP__ >= 0x8000) // CodeWarrior compiler. + #define EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE 1 + #else + #define EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE 0 + #endif +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_MINMAX_ENABLED +// +// Defined as 0 or 1; default is 1. +// Specifies whether the min and max algorithms are available. +// It may be useful to disable the min and max algorithms because sometimes +// #defines for min and max exist which would collide with EASTL min and max. +// Note that there are already alternative versions of min and max in EASTL +// with the min_alt and max_alt functions. You can use these without colliding +// with min/max macros that may exist. +// +/////////////////////////////////////////////////////////////////////////////// +#ifndef EASTL_MINMAX_ENABLED + #define EASTL_MINMAX_ENABLED 1 +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_NOMINMAX +// +// Defined as 0 or 1; default is 1. +// MSVC++ has #defines for min/max which collide with the min/max algorithm +// declarations. If EASTL_NOMINMAX is defined as 1, then we undefine min and +// max if they are #defined by an external library. This allows our min and +// max definitions in algorithm.h to work as expected. An alternative to +// the enabling of EASTL_NOMINMAX is to #define NOMINMAX in your project +// settings if you are compiling for Windows. +// Note that this does not control the availability of the EASTL min and max +// algorithms; the EASTL_MINMAX_ENABLED configuration parameter does that. +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_NOMINMAX + #define EASTL_NOMINMAX 1 +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_STD_CPP_ONLY +// +// Defined as 0 or 1; default is 0. +// Disables the use of compiler language extensions. We use compiler language +// extensions only in the case that they provide some benefit that can't be +// had any other practical way. But sometimes the compiler is set to disable +// language extensions or sometimes one compiler's preprocesor is used to generate +// code for another compiler, and so it's necessary to disable language extension usage. +// +// Example usage: +// #if defined(_MSC_VER) && !EASTL_STD_CPP_ONLY +// enum : size_type { npos = container_type::npos }; // Microsoft extension which results in significantly smaller debug symbols. +// #else +// static const size_type npos = container_type::npos; +// #endif +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_STD_CPP_ONLY + #define EASTL_STD_CPP_ONLY 0 +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_NO_RVALUE_REFERENCES +// +// Defined as 0 or 1. +// This is the same as EABase EA_COMPILER_NO_RVALUE_REFERENCES except that it +// follows the convention of being always defined, as 0 or 1. +/////////////////////////////////////////////////////////////////////////////// +#if !defined(EASTL_NO_RVALUE_REFERENCES) + #if defined(EA_COMPILER_NO_RVALUE_REFERENCES) + #define EASTL_NO_RVALUE_REFERENCES 1 + #else + #define EASTL_NO_RVALUE_REFERENCES 0 + #endif +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_MOVE_SEMANTICS_ENABLED +// +// Defined as 0 or 1. +// If enabled then C++11-like functionality with rvalue references and move +// operations is enabled. +/////////////////////////////////////////////////////////////////////////////// +#if !defined(EASTL_MOVE_SEMANTICS_ENABLED) + #if EASTL_NO_RVALUE_REFERENCES // If the compiler doesn't support rvalue references or EASTL is configured to disable them... + #define EASTL_MOVE_SEMANTICS_ENABLED 0 + #else + #define EASTL_MOVE_SEMANTICS_ENABLED 1 + #endif +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_VARIADIC_TEMPLATES_ENABLED +// +// Defined as 0 or 1. +// If enabled then C++11-like functionality with variadic templates is enabled. +/////////////////////////////////////////////////////////////////////////////// +#if !defined(EASTL_VARIADIC_TEMPLATES_ENABLED) + #if defined(EA_COMPILER_NO_VARIADIC_TEMPLATES) // If the compiler doesn't support variadic templates + #define EASTL_VARIADIC_TEMPLATES_ENABLED 0 + #else + #define EASTL_VARIADIC_TEMPLATES_ENABLED 1 + #endif +#endif + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_VARIABLE_TEMPLATES_ENABLED +// +// Defined as 0 or 1. +// If enabled then C++11-like functionality with variable templates is enabled. +/////////////////////////////////////////////////////////////////////////////// +#if !defined(EASTL_VARIABLE_TEMPLATES_ENABLED) + #if((EABASE_VERSION_N < 20605) || defined(EA_COMPILER_NO_VARIABLE_TEMPLATES)) + #define EASTL_VARIABLE_TEMPLATES_ENABLED 0 + #else + #define EASTL_VARIABLE_TEMPLATES_ENABLED 1 + #endif +#endif + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_INLINE_VARIABLE_ENABLED +// +// Defined as 0 or 1. +// If enabled then C++17-like functionality with inline variable is enabled. +/////////////////////////////////////////////////////////////////////////////// +#if !defined(EASTL_INLINE_VARIABLE_ENABLED) + #if((EABASE_VERSION_N < 20707) || defined(EA_COMPILER_NO_INLINE_VARIABLES)) + #define EASTL_INLINE_VARIABLE_ENABLED 0 + #else + #define EASTL_INLINE_VARIABLE_ENABLED 1 + #endif +#endif + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_CPP17_INLINE_VARIABLE +// +// Used to prefix a variable as inline when C++17 inline variables are available +// Usage: EASTL_CPP17_INLINE_VARIABLE constexpr bool type_trait_v = type_trait::value +/////////////////////////////////////////////////////////////////////////////// +#if !defined(EASTL_CPP17_INLINE_VARIABLE) + #if EASTL_INLINE_VARIABLE_ENABLED + #define EASTL_CPP17_INLINE_VARIABLE inline + #else + #define EASTL_CPP17_INLINE_VARIABLE + #endif +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EA_COMPILER_NO_FUNCTION_TEMPLATE_DEFAULT_ARGS undef +// +// We need revise this macro to be undefined in some cases, in case the user +// isn't using an updated EABase. +/////////////////////////////////////////////////////////////////////////////// +#if defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 403) // It may in fact be supported by 4.01 or 4.02 but we don't have compilers to test with. + #if defined(EA_COMPILER_NO_FUNCTION_TEMPLATE_DEFAULT_ARGS) + #undef EA_COMPILER_NO_FUNCTION_TEMPLATE_DEFAULT_ARGS + #endif +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_NO_RANGE_BASED_FOR_LOOP +// +// Defined as 0 or 1. +// This is the same as EABase EA_COMPILER_NO_RANGE_BASED_FOR_LOOP except that it +// follows the convention of being always defined, as 0 or 1. +/////////////////////////////////////////////////////////////////////////////// +#if !defined(EASTL_NO_RANGE_BASED_FOR_LOOP) + #if defined(EA_COMPILER_NO_RANGE_BASED_FOR_LOOP) + #define EASTL_NO_RANGE_BASED_FOR_LOOP 1 + #else + #define EASTL_NO_RANGE_BASED_FOR_LOOP 0 + #endif +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_ALIGN_OF +// +// Determines the alignment of a type. +// +// Example usage: +// size_t alignment = EASTL_ALIGN_OF(int); +// +/////////////////////////////////////////////////////////////////////////////// +#ifndef EASTL_ALIGN_OF + #define EASTL_ALIGN_OF alignof +#endif + + + + +/////////////////////////////////////////////////////////////////////////////// +// eastl_size_t +// +// Defined as an unsigned integer type, usually either size_t or uint32_t. +// Defaults to size_t to match std STL unless the user specifies to use +// uint32_t explicitly via the EASTL_SIZE_T_32BIT define +// +// Example usage: +// eastl_size_t n = intVector.size(); +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_SIZE_T_32BIT // Defines whether EASTL_SIZE_T uses uint32_t/int32_t as opposed to size_t/ssize_t. + #define EASTL_SIZE_T_32BIT 0 // This makes a difference on 64 bit platforms because they use a 64 bit size_t. +#endif // By default we do the same thing as std STL and use size_t. + +#ifndef EASTL_SIZE_T + #if (EASTL_SIZE_T_32BIT == 0) || (EA_PLATFORM_WORD_SIZE == 4) + #include + #define EASTL_SIZE_T size_t + #define EASTL_SSIZE_T intptr_t + + // printf format specifiers for use with eastl_size_t + #define EASTL_PRIdSIZE "zd" + #define EASTL_PRIiSIZE "zi" + #define EASTL_PRIoSIZE "zo" + #define EASTL_PRIuSIZE "zu" + #define EASTL_PRIxSIZE "zx" + #define EASTL_PRIXSIZE "zX" + #else + #define EASTL_SIZE_T uint32_t + #define EASTL_SSIZE_T int32_t + + // printf format specifiers for use with eastl_size_t + #define EASTL_PRIdSIZE PRId32 + #define EASTL_PRIiSIZE PRIi32 + #define EASTL_PRIoSIZE PRIo32 + #define EASTL_PRIuSIZE PRIu32 + #define EASTL_PRIxSIZE PRIx32 + #define EASTL_PRIXSIZE PRIX32 + #endif +#endif + +typedef EASTL_SIZE_T eastl_size_t; // Same concept as std::size_t. +typedef EASTL_SSIZE_T eastl_ssize_t; // Signed version of eastl_size_t. Concept is similar to Posix's ssize_t. + + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_ALLOCATOR_EXPLICIT_ENABLED +// +// Defined as 0 or 1. Default is 0 for now but ideally would be changed to +// 1 some day. It's 0 because setting it to 1 breaks some existing code. +// This option enables the allocator ctor to be explicit, which avoids +// some undesirable silent conversions, especially with the string class. +// +// Example usage: +// class allocator +// { +// public: +// EASTL_ALLOCATOR_EXPLICIT allocator(const char* pName); +// }; +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_ALLOCATOR_EXPLICIT_ENABLED + #define EASTL_ALLOCATOR_EXPLICIT_ENABLED 0 +#endif + +#if EASTL_ALLOCATOR_EXPLICIT_ENABLED + #define EASTL_ALLOCATOR_EXPLICIT explicit +#else + #define EASTL_ALLOCATOR_EXPLICIT +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_ALLOCATOR_MIN_ALIGNMENT +// +// Defined as an integral power-of-2 that's >= 1. +// Identifies the minimum alignment that EASTL should assume its allocators +// use. There is code within EASTL that decides whether to do a Malloc or +// MallocAligned call and it's typically better if it can use the Malloc call. +// But this requires knowing what the minimum possible alignment is. +#if !defined(EASTL_ALLOCATOR_MIN_ALIGNMENT) + #define EASTL_ALLOCATOR_MIN_ALIGNMENT EA_PLATFORM_MIN_MALLOC_ALIGNMENT +#endif + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_SYSTEM_ALLOCATOR_MIN_ALIGNMENT +// +// Identifies the minimum alignment that EASTL should assume system allocations +// from malloc and new will have. +#if !defined(EASTL_SYSTEM_ALLOCATOR_MIN_ALIGNMENT) + #if defined(EA_PLATFORM_MICROSOFT) || defined(EA_PLATFORM_APPLE) + #define EASTL_SYSTEM_ALLOCATOR_MIN_ALIGNMENT 16 + #else + #define EASTL_SYSTEM_ALLOCATOR_MIN_ALIGNMENT (EA_PLATFORM_PTR_SIZE * 2) + #endif +#endif + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL allocator +// +// The EASTL allocator system allows you to redefine how memory is allocated +// via some defines that are set up here. In the container code, memory is +// allocated via macros which expand to whatever the user has them set to +// expand to. Given that there are multiple allocator systems available, +// this system allows you to configure it to use whatever system you want, +// provided your system meets the requirements of this library. +// The requirements are: +// +// - Must be constructable via a const char* (name) parameter. +// Some uses of allocators won't require this, however. +// - Allocate a block of memory of size n and debug name string. +// - Allocate a block of memory of size n, debug name string, +// alignment a, and offset o. +// - Free memory allocated via either of the allocation functions above. +// - Provide a default allocator instance which can be used if the user +// doesn't provide a specific one. +// +/////////////////////////////////////////////////////////////////////////////// + +// namespace eastl +// { +// class allocator +// { +// allocator(const char* pName = NULL); +// +// void* allocate(size_t n, int flags = 0); +// void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0); +// void deallocate(void* p, size_t n); +// +// const char* get_name() const; +// void set_name(const char* pName); +// }; +// +// allocator* GetDefaultAllocator(); // This is used for anonymous allocations. +// } + +#ifndef EASTLAlloc // To consider: Instead of calling through pAllocator, just go directly to operator new, since that's what allocator does. + #define EASTLAlloc(allocator, n) (allocator).allocate(n); +#endif + +#ifndef EASTLAllocFlags // To consider: Instead of calling through pAllocator, just go directly to operator new, since that's what allocator does. + #define EASTLAllocFlags(allocator, n, flags) (allocator).allocate(n, flags); +#endif + +#ifndef EASTLAllocAligned + #define EASTLAllocAligned(allocator, n, alignment, offset) (allocator).allocate((n), (alignment), (offset)) +#endif + +#ifndef EASTLAllocAlignedFlags + #define EASTLAllocAlignedFlags(allocator, n, alignment, offset, flags) (allocator).allocate((n), (alignment), (offset), (flags)) +#endif + +#ifndef EASTLFree + #define EASTLFree(allocator, p, size) (allocator).deallocate((void*)(p), (size)) // Important to cast to void* as p may be non-const. +#endif + +#ifndef EASTLAllocatorType + #define EASTLAllocatorType eastl::allocator +#endif + +#ifndef EASTLDummyAllocatorType + #define EASTLDummyAllocatorType eastl::dummy_allocator +#endif + +#ifndef EASTLAllocatorDefault + // EASTLAllocatorDefault returns the default allocator instance. This is not a global + // allocator which implements all container allocations but is the allocator that is + // used when EASTL needs to allocate memory internally. There are very few cases where + // EASTL allocates memory internally, and in each of these it is for a sensible reason + // that is documented to behave as such. + #define EASTLAllocatorDefault eastl::GetDefaultAllocator +#endif + + +/// EASTL_ALLOCATOR_DEFAULT_NAME +/// +/// Defines a default allocator name in the absence of a user-provided name. +/// +#ifndef EASTL_ALLOCATOR_DEFAULT_NAME + #define EASTL_ALLOCATOR_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX // Unless the user overrides something, this is "EASTL". +#endif + +/// EASTL_USE_FORWARD_WORKAROUND +/// +/// This is to workaround a compiler bug that we found in VS2013. Update 1 did not fix it. +/// This should be fixed in a future release of VS2013 http://accentuable4.rssing.com/browser.php?indx=3511740&item=15696 +/// +#ifndef EASTL_USE_FORWARD_WORKAROUND + #if defined(_MSC_FULL_VER) && _MSC_FULL_VER == 180021005 || (defined(__EDG_VERSION__) && (__EDG_VERSION__ < 405))// VS2013 initial release + #define EASTL_USE_FORWARD_WORKAROUND 1 + #else + #define EASTL_USE_FORWARD_WORKAROUND 0 + #endif +#endif + + +/// EASTL_TUPLE_ENABLED +/// EASTL tuple implementation depends on variadic template support +#if EASTL_VARIADIC_TEMPLATES_ENABLED && !defined(EA_COMPILER_NO_TEMPLATE_ALIASES) + #define EASTL_TUPLE_ENABLED 1 +#else + #define EASTL_TUPLE_ENABLED 0 +#endif + + +/// EASTL_USER_LITERALS_ENABLED +#ifndef EASTL_USER_LITERALS_ENABLED + #if defined(EA_COMPILER_CPP14_ENABLED) + #define EASTL_USER_LITERALS_ENABLED 1 + #else + #define EASTL_USER_LITERALS_ENABLED 0 + #endif +#endif + + +/// EASTL_INLINE_NAMESPACES_ENABLED +#ifndef EASTL_INLINE_NAMESPACES_ENABLED + #if defined(EA_COMPILER_CPP14_ENABLED) + #define EASTL_INLINE_NAMESPACES_ENABLED 1 + #else + #define EASTL_INLINE_NAMESPACES_ENABLED 0 + #endif +#endif + + +/// EASTL_CORE_ALLOCATOR_ENABLED +#ifndef EASTL_CORE_ALLOCATOR_ENABLED + #define EASTL_CORE_ALLOCATOR_ENABLED 0 +#endif + +/// EASTL_OPENSOURCE +/// This is enabled when EASTL is building built in an "open source" mode. Which is a mode that eliminates code +/// dependencies on other technologies that have not been released publically. +/// EASTL_OPENSOURCE = 0, is the default. +/// EASTL_OPENSOURCE = 1, utilizes technologies that not publically available. +/// +#ifndef EASTL_OPENSOURCE + #define EASTL_OPENSOURCE 0 +#endif + + +/// EASTL_OPTIONAL_ENABLED +#if defined(EA_COMPILER_MSVC_2012) + #define EASTL_OPTIONAL_ENABLED 0 +#elif defined(EA_COMPILER_MSVC_2013) + #define EASTL_OPTIONAL_ENABLED 0 +#elif defined(EA_COMPILER_MSVC_2015) + #define EASTL_OPTIONAL_ENABLED 1 +#elif EASTL_VARIADIC_TEMPLATES_ENABLED && !defined(EA_COMPILER_NO_TEMPLATE_ALIASES) && !defined(EA_COMPILER_NO_DEFAULTED_FUNCTIONS) && defined(EA_COMPILER_CPP11_ENABLED) + #define EASTL_OPTIONAL_ENABLED 1 +#else + #define EASTL_OPTIONAL_ENABLED 0 +#endif + +/// EASTL_HAS_INTRINSIC(x) +/// does the compiler intrinsic (MSVC terminology) or builtin (Clang / GCC terminology) exist? +/// where `x` does not include the leading "__" +#if defined(EA_COMPILER_CLANG) + // see https://clang.llvm.org/docs/LanguageExtensions.html#type-trait-primitives + #if EA_COMPILER_VERSION >= 1000 + #define EASTL_HAS_INTRINSIC(x) EA_COMPILER_HAS_BUILTIN(__ ## x) + #elif EA_COMPILER_VERSION >= 600 + // NB: !__is_identifier() is correct: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66970#c11 + #define EASTL_HAS_INTRINSIC(x) !__is_identifier(__ ## x) + #else + // note: only works for a subset of builtins + #define EASTL_HAS_INTRINSIC(x) EA_COMPILER_HAS_FEATURE(x) + #endif +#else +#define EASTL_HAS_INTRINSIC(x) EA_COMPILER_HAS_BUILTIN(__ ## x) +#endif + +/// EASTL_HAS_UNIQUE_OBJECT_REPRESENTATIONS_AVAILABLE +#if EASTL_HAS_INTRINSIC(has_unique_object_representations) || (defined(_MSC_VER) && (_MSC_VER >= 1913)) // VS2017 15.6+ + #define EASTL_HAS_UNIQUE_OBJECT_REPRESENTATIONS_AVAILABLE 1 +#else + #define EASTL_HAS_UNIQUE_OBJECT_REPRESENTATIONS_AVAILABLE 0 +#endif + +#if EASTL_HAS_INTRINSIC(is_final) || defined(EA_COMPILER_GNUC) || (defined(_MSC_VER) && (_MSC_VER >= 1914)) // VS2017 15.7+ + #define EASTL_IS_FINAL_AVAILABLE 1 +#else + #define EASTL_IS_FINAL_AVAILABLE 0 +#endif + +#if EASTL_HAS_INTRINSIC(is_aggregate) || defined(EA_COMPILER_GNUC) || (defined(_MSC_VER) && (_MSC_VER >= 1915)) // VS2017 15.8+ + #define EASTL_IS_AGGREGATE_AVAILABLE 1 +#else + #define EASTL_IS_AGGREGATE_AVAILABLE 0 +#endif + + +/// EASTL_ENABLE_PAIR_FIRST_ELEMENT_CONSTRUCTOR +/// This feature define allows users to toggle the problematic eastl::pair implicit +/// single element constructor. +#ifndef EASTL_ENABLE_PAIR_FIRST_ELEMENT_CONSTRUCTOR + #define EASTL_ENABLE_PAIR_FIRST_ELEMENT_CONSTRUCTOR 0 +#endif + +/// EASTL_SYSTEM_BIG_ENDIAN_STATEMENT +/// EASTL_SYSTEM_LITTLE_ENDIAN_STATEMENT +/// These macros allow you to write endian specific macros as statements. +/// This allows endian specific code to be macro expanded from within other macros +/// +#if defined(EA_SYSTEM_BIG_ENDIAN) + #define EASTL_SYSTEM_BIG_ENDIAN_STATEMENT(...) __VA_ARGS__ +#else + #define EASTL_SYSTEM_BIG_ENDIAN_STATEMENT(...) +#endif + +#if defined(EA_SYSTEM_LITTLE_ENDIAN) + #define EASTL_SYSTEM_LITTLE_ENDIAN_STATEMENT(...) __VA_ARGS__ +#else + #define EASTL_SYSTEM_LITTLE_ENDIAN_STATEMENT(...) +#endif + +/// EASTL_CONSTEXPR_BIT_CAST_SUPPORTED +/// eastl::bit_cast, in order to be implemented as constexpr, requires explicit compiler support. +/// This macro defines whether it's possible for bit_cast to be constexpr. +/// +#if (defined(EA_COMPILER_MSVC) && defined(EA_COMPILER_MSVC_VERSION_14_26) && EA_COMPILER_VERSION >= EA_COMPILER_MSVC_VERSION_14_26) \ + || EA_COMPILER_HAS_BUILTIN(__builtin_bit_cast) + #define EASTL_CONSTEXPR_BIT_CAST_SUPPORTED 1 +#else + #define EASTL_CONSTEXPR_BIT_CAST_SUPPORTED 0 +#endif + +// EASTL deprecation macros: +// +// EASTL_DEPRECATIONS_FOR_2024_APRIL +// This macro is provided as a means to disable warnings temporarily (in particular if a user is compiling with warnings as errors). +// All deprecations raised by this macro (when it is EA_ENABLED) are scheduled for removal approximately April 2024. + +#ifndef EASTL_DEPRECATIONS_FOR_2024_APRIL + #if defined(EA_DEPRECATIONS_FOR_2024_APRIL) + #define EASTL_DEPRECATIONS_FOR_2024_APRIL EA_DEPRECATIONS_FOR_2024_APRIL + #else + #define EASTL_DEPRECATIONS_FOR_2024_APRIL EA_ENABLED + #endif +#endif + +#if EA_IS_ENABLED(EASTL_DEPRECATIONS_FOR_2024_APRIL) + #define EASTL_REMOVE_AT_2024_APRIL EA_DEPRECATED +#else + #define EASTL_REMOVE_AT_2024_APRIL +#endif + +// EASTL_DEPRECATIONS_FOR_2024_SEPT +// This macro is provided as a means to disable warnings temporarily (in particular if a user is compiling with warnings +// as errors). All deprecations raised by this macro (when it is EA_ENABLED) are scheduled for removal approximately +// September 2024. + +#ifndef EASTL_DEPRECATIONS_FOR_2024_SEPT + #if defined(EA_DEPRECATIONS_FOR_2024_SEPT) + #define EASTL_DEPRECATIONS_FOR_2024_SEPT EA_DEPRECATIONS_FOR_2024_SEPT + #else + #define EASTL_DEPRECATIONS_FOR_2024_SEPT EA_ENABLED + #endif +#endif + +#if EA_IS_ENABLED(EASTL_DEPRECATIONS_FOR_2024_SEPT) + #define EASTL_REMOVE_AT_2024_SEPT EA_DEPRECATED +#else + #define EASTL_REMOVE_AT_2024_SEPT +#endif + +// For internal (to EASTL) use only (ie. tests). +#define EASTL_INTERNAL_DISABLE_DEPRECATED() \ + EA_DISABLE_VC_WARNING(4996); \ + EA_DISABLE_CLANG_WARNING(-Wdeprecated-declarations); \ + EA_DISABLE_GCC_WARNING(-Wdeprecated-declarations); + +// For internal (to EASTL) use only (ie. tests). +#define EASTL_INTERNAL_RESTORE_DEPRECATED() \ + EA_RESTORE_CLANG_WARNING(); \ + EA_RESTORE_VC_WARNING(); \ + EA_RESTORE_GCC_WARNING(); + +#endif // Header include guard diff --git a/external/EASTL/include/EASTL/internal/copy_help.h b/external/EASTL/include/EASTL/internal/copy_help.h new file mode 100644 index 00000000..a7e5c0d1 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/copy_help.h @@ -0,0 +1,221 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTERNAL_COPY_HELP_H +#define EASTL_INTERNAL_COPY_HELP_H + +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +#include +#include +#include // memcpy, memcmp, memmove + + +namespace eastl +{ + /// move / move_n / move_backward + /// copy / copy_n / copy_backward + /// + /// We want to optimize move, move_n, move_backward, copy, copy_backward, copy_n to do memmove operations + /// when possible. + /// + /// We could possibly use memcpy, though it has stricter overlap requirements than the move and copy + /// algorithms and would require a runtime if/else to choose it over memmove. In particular, memcpy + /// allows no range overlap at all, whereas move/copy allow output end overlap and move_backward/copy_backward + /// allow output begin overlap. Despite this it might be useful to use memcpy for any platforms where + /// memcpy is significantly faster than memmove, and since in most cases the copy/move operation in fact + /// doesn't target overlapping memory and so memcpy would be usable. + /// + /// We can use memmove/memcpy if the following hold true: + /// InputIterator and OutputIterator have the same value type. + /// InputIterator and OutputIterator are of type contiguous_iterator_tag or simply are pointers (the two are virtually synonymous). + /// is_trivially_copyable::value is true. i.e. from the standard (http://www.eel.is/c++draft/basic.types.general#2): + /// For any object (other than a potentially-overlapping subobject) of trivially copyable type T, whether or not the object + /// holds a valid value of type T, the underlying bytes making up the object can be copied into an array of char, unsigned char, + /// or std::byte [footnote: By using, for example, the library functions std::memcpy or std::memmove]. + /// + /// copy normally differs from move, but there is a case where copy is the same as move: when copy is + /// used with a move_iterator. We handle that case here by detecting that copy is being done with a + /// move_iterator and redirect it to move (which can take advantage of memmove/memcpy). + + // Implementation moving copying both trivial and non-trivial data via a lesser iterator than random-access. + template + struct move_and_copy_helper + { + template + static OutputIterator move_or_copy(InputIterator first, InputIterator last, OutputIterator result) + { + for(; first != last; ++result, ++first) + *result = *first; + return result; + } + }; + + // Specialization for copying non-trivial data via a random-access iterator. It's theoretically faster because the compiler can see the count when its a compile-time const. + // This specialization converts the random access InputIterator last-first to an integral type. There's simple way for us to take advantage of a random access output iterator, + // as the range is specified by the input instead of the output, and distance(first, last) for a non-random-access iterator is potentially slow. + template <> + struct move_and_copy_helper + { + template + static OutputIterator move_or_copy(InputIterator first, InputIterator last, OutputIterator result) + { + typedef typename eastl::iterator_traits::difference_type difference_type; + + for(difference_type n = (last - first); n > 0; --n, ++first, ++result) + *result = *first; + + return result; + } + }; + + // Specialization for moving non-trivial data via a lesser iterator than random-access. + template + struct move_and_copy_helper + { + template + static OutputIterator move_or_copy(InputIterator first, InputIterator last, OutputIterator result) + { + for(; first != last; ++result, ++first) + *result = eastl::move(*first); + return result; + } + }; + + // Specialization for moving non-trivial data via a random-access iterator. It's theoretically faster because the compiler can see the count when its a compile-time const. + template <> + struct move_and_copy_helper + { + template + static OutputIterator move_or_copy(InputIterator first, InputIterator last, OutputIterator result) + { + typedef typename eastl::iterator_traits::difference_type difference_type; + + for(difference_type n = (last - first); n > 0; --n, ++first, ++result) + *result = eastl::move(*first); + + return result; + } + }; + + // Specialization for when we can use memmove/memcpy. See the notes above for what conditions allow this. + template + struct move_and_copy_helper + { + template + static T* move_or_copy(const T* first, const T* last, T* result) + { + if (EASTL_UNLIKELY(first == last)) + return result; + + // We could use memcpy here if there's no range overlap, but memcpy is rarely much faster than memmove. + return (T*)memmove(result, first, (size_t)((uintptr_t)last - (uintptr_t)first)) + (last - first); + } + }; + + + namespace internal { + // This exists to handle the case when EASTL_ITC_NS is `std` + // and the C++ version is older than C++20, in this case + // std::contiguous_iterator_tag does not exist so we can't use + // is_same<> directly. + #if !EASTL_STD_ITERATOR_CATEGORY_ENABLED || defined(EA_COMPILER_CPP20_ENABLED) + template + using is_contiguous_iterator = eastl::is_same; + #else + template + using is_contiguous_iterator = eastl::false_type; + #endif + + template + struct can_be_memmoved_helper { + using IIC = typename eastl::iterator_traits::iterator_category; + using OIC = typename eastl::iterator_traits::iterator_category; + using value_type_input = typename eastl::iterator_traits::value_type; + using value_type_output = typename eastl::iterator_traits::value_type; + + static constexpr bool value = eastl::is_trivially_copyable::value && + eastl::is_same::value && + (eastl::is_pointer::value || is_contiguous_iterator::value) && + (eastl::is_pointer::value || is_contiguous_iterator::value); + + }; + } + + template + inline OutputIterator move_and_copy_chooser(InputIterator first, InputIterator last, OutputIterator result) + { + typedef typename eastl::iterator_traits::iterator_category IIC; + + const bool canBeMemmoved = internal::can_be_memmoved_helper::value; + + // Need to choose based on the input iterator tag and not the output iterator tag, because containers accept input ranges of iterator types different than self. + return eastl::move_and_copy_helper::move_or_copy(first, last, result); + } + + + // We have a second layer of unwrap_iterator calls because the original iterator might be something like move_iterator > (i.e. doubly-wrapped). + template + EASTL_REMOVE_AT_2024_SEPT inline OutputIterator move_and_copy_unwrapper(InputIterator first, InputIterator last, OutputIterator result) + { + EASTL_INTERNAL_DISABLE_DEPRECATED() // 'unwrap_iterator': was declared deprecated + return OutputIterator(eastl::move_and_copy_chooser(eastl::unwrap_iterator(first), eastl::unwrap_iterator(last), eastl::unwrap_iterator(result))); // Have to convert to OutputIterator because unwrap_iterator(result) could be a T* + EASTL_INTERNAL_RESTORE_DEPRECATED() + } + + + /// move + /// + /// After this operation the elements in the moved-from range will still contain valid values of the + /// appropriate type, but not necessarily the same values as before the move. + /// Returns the end of the result range. + /// Note: When moving between containers, the dest range must be valid; this function doesn't resize containers. + /// Note: if result is within [first, last), move_backward must be used instead of move. + /// + /// Example usage: + /// eastl::move(myArray.begin(), myArray.end(), myDestArray.begin()); + /// + /// Reference implementation: + /// template + /// OutputIterator move(InputIterator first, InputIterator last, OutputIterator result) + /// { + /// while(first != last) + /// *result++ = eastl::move(*first++); + /// return result; + /// } + + template + inline OutputIterator move(InputIterator first, InputIterator last, OutputIterator result) + { + return eastl::move_and_copy_chooser(first, last, result); + } + + + /// copy + /// + /// Effects: Copies elements in the range [first, last) into the range [result, result + (last - first)) + /// starting from first and proceeding to last. For each nonnegative integer n < (last - first), + /// performs *(result + n) = *(first + n). + /// + /// Returns: result + (last - first). That is, returns the end of the result. Note that this + /// is different from how memmove/memcpy work, as they return the beginning of the result. + /// + /// Requires: result shall not be in the range [first, last). But the end of the result range + /// may in fact be within the input rante. + /// + /// Complexity: Exactly 'last - first' assignments. + /// + template + inline OutputIterator copy(InputIterator first, InputIterator last, OutputIterator result) + { + return eastl::move_and_copy_chooser(first, last, result); + } +} // namespace eastl + +#endif // EASTL_INTERNAL_COPY_HELP_H diff --git a/external/EASTL/include/EASTL/internal/enable_shared.h b/external/EASTL/include/EASTL/internal/enable_shared.h new file mode 100644 index 00000000..ac5f0729 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/enable_shared.h @@ -0,0 +1,83 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTERNAL_ENABLE_SHARED_H +#define EASTL_INTERNAL_ENABLE_SHARED_H + + +#include +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +namespace eastl +{ + + /// enable_shared_from_this + /// + /// This is a helper mixin class that allows you to make any class + /// export a shared_ptr instance that is associated with the class + /// instance. Any class that inherits from this class gets two functions: + /// shared_ptr shared_from_this(); + /// shared_ptr shared_from_this() const; + /// If you call shared_from_this, you get back a shared_ptr that + /// refers to the class. A second call to shared_from_this returns + /// another shared_ptr that is shared with the first one. + /// + /// The trick that happens which is not so obvious here (and which is + /// not mentioned at all in the Boost documentation of their version + /// of this) is that the shared_ptr constructor detects that the + /// class has an enable_shared_from_this mixin and sets up this system + /// automatically for the user. This is done with template tricks. + /// + /// For some additional explanation, see the Boost documentation for + /// their description of their version of enable_shared_from_this. + /// + template + class enable_shared_from_this + { + public: + shared_ptr shared_from_this() + { return shared_ptr(mWeakPtr); } + + shared_ptr shared_from_this() const + { return shared_ptr(mWeakPtr); } + + weak_ptr weak_from_this() + { return mWeakPtr; } + + weak_ptr weak_from_this() const + { return mWeakPtr; } + + public: // This is public because the alternative fails on some compilers that we need to support. + mutable weak_ptr mWeakPtr; + + protected: + template friend class shared_ptr; + + EA_CONSTEXPR enable_shared_from_this() EA_NOEXCEPT + { } + + enable_shared_from_this(const enable_shared_from_this&) EA_NOEXCEPT + { } + + enable_shared_from_this& operator=(const enable_shared_from_this&) EA_NOEXCEPT + { return *this; } + + ~enable_shared_from_this() + { } + + }; // enable_shared_from_this + +} // namespace eastl + + +#endif // Header include guard + + + + + + diff --git a/external/EASTL/include/EASTL/internal/fill_help.h b/external/EASTL/include/EASTL/internal/fill_help.h new file mode 100644 index 00000000..07e3b62d --- /dev/null +++ b/external/EASTL/include/EASTL/internal/fill_help.h @@ -0,0 +1,484 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTERNAL_FILL_HELP_H +#define EASTL_INTERNAL_FILL_HELP_H + + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +#include + +#if defined(EA_COMPILER_MICROSOFT) && (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64)) +#include +#endif + +namespace eastl +{ + // fill + // + // We implement some fill helper functions in order to allow us to optimize it + // where possible. + // + template + struct fill_imp + { + template + static void do_fill(ForwardIterator first, ForwardIterator last, const T& value) + { + // The C++ standard doesn't specify whether we need to create a temporary + // or not, but all std STL implementations are written like what we have here. + for(; first != last; ++first) + *first = value; + } + }; + + template <> + struct fill_imp + { + template + static void do_fill(ForwardIterator first, ForwardIterator last, const T& value) + { + typedef typename eastl::iterator_traits::value_type value_type; + // We create a temp and fill from that because value might alias to the + // destination range and so the compiler would be forced into generating + // less efficient code. + for(const T temp = value; first != last; ++first) + { + EA_UNUSED(temp); + *first = static_cast(temp); + } + } + }; + + /// fill + /// + /// fill is like memset in that it assigns a single value repeatedly to a + /// destination range. It allows for any type of iterator (not just an array) + /// and the source value can be any type, not just a byte. + /// Note that the source value (which is a reference) can come from within + /// the destination range. + /// + /// Effects: Assigns value through all the iterators in the range [first, last). + /// + /// Complexity: Exactly 'last - first' assignments. + /// + /// Note: The C++ standard doesn't specify anything about the value parameter + /// coming from within the first-last range. All std STL implementations act + /// as if the standard specifies that value must not come from within this range. + /// + template + inline void fill(ForwardIterator first, ForwardIterator last, const T& value) + { + eastl::fill_imp< is_scalar::value >::do_fill(first, last, value); + + // Possibly better implementation, as it will deal with small PODs as well as scalars: + // bEasyCopy is true if the type has a trivial constructor (e.g. is a POD) and if + // it is small. Thus any built-in type or any small user-defined struct will qualify. + //const bool bEasyCopy = eastl::type_and::value, + // eastl::integral_constant::value; + //eastl::fill_imp::do_fill(first, last, value); + + } + + #if (defined(EA_COMPILER_GNUC) || defined(__clang__)) && (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64)) + #if defined(EA_PROCESSOR_X86_64) + template + inline void fill(uint64_t* first, uint64_t* last, Value c) + { + uintptr_t count = (uintptr_t)(last - first); + uint64_t value = (uint64_t)(c); + + __asm__ __volatile__ ("cld\n\t" + "rep stosq\n\t" + : "+c" (count), "+D" (first), "=m" (first) + : "a" (value) + : "cc" ); + } + + + template + inline void fill(int64_t* first, int64_t* last, Value c) + { + uintptr_t count = (uintptr_t)(last - first); + int64_t value = (int64_t)(c); + + __asm__ __volatile__ ("cld\n\t" + "rep stosq\n\t" + : "+c" (count), "+D" (first), "=m" (first) + : "a" (value) + : "cc" ); + } + #endif + + template + inline void fill(uint32_t* first, uint32_t* last, Value c) + { + uintptr_t count = (uintptr_t)(last - first); + uint32_t value = (uint32_t)(c); + + __asm__ __volatile__ ("cld\n\t" + "rep stosl\n\t" + : "+c" (count), "+D" (first), "=m" (first) + : "a" (value) + : "cc" ); + } + + + template + inline void fill(int32_t* first, int32_t* last, Value c) + { + uintptr_t count = (uintptr_t)(last - first); + int32_t value = (int32_t)(c); + + __asm__ __volatile__ ("cld\n\t" + "rep stosl\n\t" + : "+c" (count), "+D" (first), "=m" (first) + : "a" (value) + : "cc" ); + } + + + template + inline void fill(uint16_t* first, uint16_t* last, Value c) + { + uintptr_t count = (uintptr_t)(last - first); + uint16_t value = (uint16_t)(c); + + __asm__ __volatile__ ("cld\n\t" + "rep stosw\n\t" + : "+c" (count), "+D" (first), "=m" (first) + : "a" (value) + : "cc" ); + } + + + template + inline void fill(int16_t* first, int16_t* last, Value c) + { + uintptr_t count = (uintptr_t)(last - first); + int16_t value = (int16_t)(c); + + __asm__ __volatile__ ("cld\n\t" + "rep stosw\n\t" + : "+c" (count), "+D" (first), "=m" (first) + : "a" (value) + : "cc" ); + } + + #elif defined(EA_COMPILER_MICROSOFT) && (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64)) + #if defined(EA_PROCESSOR_X86_64) + template + inline void fill(uint64_t* first, uint64_t* last, Value c) + { + __stosq(first, (uint64_t)c, (size_t)(last - first)); + } + + template + inline void fill(int64_t* first, int64_t* last, Value c) + { + __stosq((uint64_t*)first, (uint64_t)c, (size_t)(last - first)); + } + #endif + + template + inline void fill(uint32_t* first, uint32_t* last, Value c) + { + __stosd((unsigned long*)first, (unsigned long)c, (size_t)(last - first)); + } + + template + inline void fill(int32_t* first, int32_t* last, Value c) + { + __stosd((unsigned long*)first, (unsigned long)c, (size_t)(last - first)); + } + + template + inline void fill(uint16_t* first, uint16_t* last, Value c) + { + __stosw(first, (uint16_t)c, (size_t)(last - first)); + } + + template + inline void fill(int16_t* first, int16_t* last, Value c) + { + __stosw((uint16_t*)first, (uint16_t)c, (size_t)(last - first)); + } + #endif + + + inline void fill(char* first, char* last, const char& c) // It's debateable whether we should use 'char& c' or 'char c' here. + { + memset(first, (unsigned char)c, (size_t)(last - first)); + } + + inline void fill(char* first, char* last, const int c) // This is used for cases like 'fill(first, last, 0)'. + { + memset(first, (unsigned char)c, (size_t)(last - first)); + } + + inline void fill(unsigned char* first, unsigned char* last, const unsigned char& c) + { + memset(first, (unsigned char)c, (size_t)(last - first)); + } + + inline void fill(unsigned char* first, unsigned char* last, const int c) + { + memset(first, (unsigned char)c, (size_t)(last - first)); + } + + inline void fill(signed char* first, signed char* last, const signed char& c) + { + memset(first, (unsigned char)c, (size_t)(last - first)); + } + + inline void fill(signed char* first, signed char* last, const int c) + { + memset(first, (unsigned char)c, (size_t)(last - first)); + } + + #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__ICL) // ICL = Intel compiler + inline void fill(bool* first, bool* last, const bool& b) + { + memset(first, (char)b, (size_t)(last - first)); + } + #endif + + + + + // fill_n + // + // We implement some fill helper functions in order to allow us to optimize it + // where possible. + // + template + struct fill_n_imp + { + template + static OutputIterator do_fill(OutputIterator first, Size n, const T& value) + { + for(; n-- > 0; ++first) + *first = value; + return first; + } + }; + + template <> + struct fill_n_imp + { + template + static OutputIterator do_fill(OutputIterator first, Size n, const T& value) + { + typedef typename eastl::iterator_traits::value_type value_type; + + // We create a temp and fill from that because value might alias to + // the destination range and so the compiler would be forced into + // generating less efficient code. + for(const T temp = value; n-- > 0; ++first) + *first = static_cast(temp); + return first; + } + }; + + /// fill_n + /// + /// The fill_n function is very much like memset in that a copies a source value + /// n times into a destination range. The source value may come from within + /// the destination range. + /// + /// Effects: Assigns value through all the iterators in the range [first, first + n). + /// + /// Complexity: Exactly n assignments. + /// + template + OutputIterator fill_n(OutputIterator first, Size n, const T& value) + { + return eastl::fill_n_imp::value>::do_fill(first, n, value); + } + + template + inline char* fill_n(char* first, Size n, const char& c) + { + return (char*)memset(first, (char)c, (size_t)n) + n; + } + + template + inline unsigned char* fill_n(unsigned char* first, Size n, const unsigned char& c) + { + return (unsigned char*)memset(first, (unsigned char)c, (size_t)n) + n; + } + + template + inline signed char* fill_n(signed char* first, Size n, const signed char& c) + { + return (signed char*)memset(first, (signed char)c, n) + (size_t)n; + } + + #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__ICL) // ICL = Intel compiler + template + inline bool* fill_n(bool* first, Size n, const bool& b) + { + return (bool*)memset(first, (char)b, n) + (size_t)n; + } + #endif + + #if (defined(EA_COMPILER_GNUC) || defined(__clang__)) && (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64)) + #if defined(EA_PROCESSOR_X86_64) + template + inline uint64_t* fill_n(uint64_t* first, Size n, Value c) + { + uintptr_t count = (uintptr_t)(n); + uint64_t value = (uint64_t)(c); + + __asm__ __volatile__ ("cld\n\t" + "rep stosq\n\t" + : "+c" (count), "+D" (first), "=m" (first) + : "a" (value) + : "cc" ); + return first; // first is updated by the code above. + } + + + template + inline int64_t* fill_n(int64_t* first, Size n, Value c) + { + uintptr_t count = (uintptr_t)(n); + int64_t value = (int64_t)(c); + + __asm__ __volatile__ ("cld\n\t" + "rep stosq\n\t" + : "+c" (count), "+D" (first), "=m" (first) + : "a" (value) + : "cc" ); + return first; // first is updated by the code above. + } + #endif + + template + inline uint32_t* fill_n(uint32_t* first, Size n, Value c) + { + uintptr_t count = (uintptr_t)(n); + uint32_t value = (uint32_t)(c); + + __asm__ __volatile__ ("cld\n\t" + "rep stosl\n\t" + : "+c" (count), "+D" (first), "=m" (first) + : "a" (value) + : "cc" ); + return first; // first is updated by the code above. + } + + + template + inline int32_t* fill_n(int32_t* first, Size n, Value c) + { + uintptr_t count = (uintptr_t)(n); + int32_t value = (int32_t)(c); + + __asm__ __volatile__ ("cld\n\t" + "rep stosl\n\t" + : "+c" (count), "+D" (first), "=m" (first) + : "a" (value) + : "cc" ); + return first; // first is updated by the code above. + } + + + template + inline uint16_t* fill_n(uint16_t* first, Size n, Value c) + { + uintptr_t count = (uintptr_t)(n); + uint16_t value = (uint16_t)(c); + + __asm__ __volatile__ ("cld\n\t" + "rep stosw\n\t" + : "+c" (count), "+D" (first), "=m" (first) + : "a" (value) + : "cc" ); + return first; // first is updated by the code above. + } + + + template + inline int16_t* fill_n(int16_t* first, Size n, Value c) + { + uintptr_t count = (uintptr_t)(n); + int16_t value = (int16_t)(c); + + __asm__ __volatile__ ("cld\n\t" + "rep stosw\n\t" + : "+c" (count), "+D" (first), "=m" (first) + : "a" (value) + : "cc" ); + return first; // first is updated by the code above. + } + + #elif defined(EA_COMPILER_MICROSOFT) && (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64)) + #if defined(EA_PROCESSOR_X86_64) + template + inline uint64_t* fill_n(uint64_t* first, Size n, Value c) + { + __stosq(first, (uint64_t)c, (size_t)n); + return first + n; + } + + template + inline int64_t* fill_n(int64_t* first, Size n, Value c) + { + __stosq((uint64_t*)first, (uint64_t)c, (size_t)n); + return first + n; + } + #endif + + template + inline uint32_t* fill_n(uint32_t* first, Size n, Value c) + { + __stosd((unsigned long*)first, (unsigned long)c, (size_t)n); + return first + n; + } + + template + inline int32_t* fill_n(int32_t* first, Size n, Value c) + { + __stosd((unsigned long*)first, (unsigned long)c, (size_t)n); + return first + n; + } + + template + inline uint16_t* fill_n(uint16_t* first, Size n, Value c) + { + __stosw(first, (uint16_t)c, (size_t)n); + return first + n; + } + + template + inline int16_t* fill_n(int16_t* first, Size n, Value c) + { + __stosw((uint16_t*)first, (uint16_t)c, (size_t)n); + return first + n; + } + #endif + +} // namespace eastl + +#endif // Header include guard + + + + + + + + + + + + + + + diff --git a/external/EASTL/include/EASTL/internal/fixed_pool.h b/external/EASTL/include/EASTL/internal/fixed_pool.h new file mode 100644 index 00000000..9f252d24 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/fixed_pool.h @@ -0,0 +1,1631 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// This file implements the following +// aligned_buffer +// fixed_pool_base +// fixed_pool +// fixed_pool_with_overflow +// fixed_hashtable_allocator +// fixed_vector_allocator +// fixed_swap +// +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTERNAL_FIXED_POOL_H +#define EASTL_INTERNAL_FIXED_POOL_H + + +#include +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +#include +#include +#include +#include +#include + + +EA_DISABLE_ALL_VC_WARNINGS(); +#include +EA_RESTORE_ALL_VC_WARNINGS(); + +// 4275 - non dll-interface class used as base for DLL-interface classkey 'identifier' +EA_DISABLE_VC_WARNING(4275); + + +namespace eastl +{ + + /// EASTL_FIXED_POOL_DEFAULT_NAME + /// + /// Defines a default allocator name in the absence of a user-provided name. + /// + #ifndef EASTL_FIXED_POOL_DEFAULT_NAME + #define EASTL_FIXED_POOL_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_pool" // Unless the user overrides something, this is "EASTL fixed_pool". + #endif + + + + /////////////////////////////////////////////////////////////////////////// + // aligned_buffer + /////////////////////////////////////////////////////////////////////////// + + /// aligned_buffer + /// + /// This is useful for creating a buffer of the same size and alignment + /// of a given struct or class. This is useful for creating memory pools + /// that support both size and alignment requirements of stored objects + /// but without wasting space in over-allocating. + /// + /// Note that we implement this via struct specializations, as some + /// compilers such as VC++ do not support specification of alignments + /// in any way other than via an integral constant. + /// + /// Example usage: + /// struct Widget{ }; // This class has a given size and alignment. + /// + /// Declare a char buffer of equal size and alignment to Widget. + /// aligned_buffer mWidgetBuffer; + /// + /// Declare an array this time. + /// aligned_buffer mWidgetArray[15]; + /// + typedef char EASTL_MAY_ALIAS aligned_buffer_char; + + template + struct aligned_buffer { aligned_buffer_char buffer[size]; }; + + template + struct aligned_buffer { EA_PREFIX_ALIGN(2) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(2); }; + + template + struct aligned_buffer { EA_PREFIX_ALIGN(4) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(4); }; + + template + struct aligned_buffer { EA_PREFIX_ALIGN(8) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(8); }; + + template + struct aligned_buffer { EA_PREFIX_ALIGN(16) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(16); }; + + template + struct aligned_buffer { EA_PREFIX_ALIGN(32) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(32); }; + + template + struct aligned_buffer { EA_PREFIX_ALIGN(64) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(64); }; + + template + struct aligned_buffer { EA_PREFIX_ALIGN(128) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(128); }; + + template + struct aligned_buffer { EA_PREFIX_ALIGN(256) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(256); }; + + template + struct aligned_buffer { EA_PREFIX_ALIGN(512) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(512); }; + + template + struct aligned_buffer { EA_PREFIX_ALIGN(1024) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(1024); }; + + template + struct aligned_buffer { EA_PREFIX_ALIGN(2048) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(2048); }; + + template + struct aligned_buffer { EA_PREFIX_ALIGN(4096) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(4096); }; + + + + + /////////////////////////////////////////////////////////////////////////// + // fixed_pool_base + /////////////////////////////////////////////////////////////////////////// + + /// fixed_pool_base + /// + /// This is a base class for the implementation of fixed-size pools. + /// In particular, the fixed_pool and fixed_pool_with_overflow classes + /// are based on fixed_pool_base. + /// + struct fixed_pool_base + { + public: + /// fixed_pool_base + /// + fixed_pool_base(void* pMemory = NULL) + : mpHead((Link*)pMemory) + , mpNext((Link*)pMemory) + , mpCapacity((Link*)pMemory) + , mnNodeSize(0) // This is normally set in the init function. + { + #if EASTL_FIXED_SIZE_TRACKING_ENABLED + mnCurrentSize = 0; + mnPeakSize = 0; + #endif + } + + + /// fixed_pool_base + /// + // Disabled because the default is sufficient. While it normally makes no sense to deep copy + // this data, our usage of this class is such that this is OK and wanted. + // + // fixed_pool_base(const fixed_pool_base& x) + // { + // } + + + /// operator= + /// + fixed_pool_base& operator=(const fixed_pool_base&) + { + // By design we do nothing. We don't attempt to deep-copy member data. + return *this; + } + + + /// init + /// + /// Initializes a fixed_pool with a given set of parameters. + /// You cannot call this function twice else the resulting + /// behaviour will be undefined. You can only call this function + /// after constructing the fixed_pool with the default constructor. + /// + EASTL_API void init(void* pMemory, size_t memorySize, size_t nodeSize, + size_t alignment, size_t alignmentOffset = 0); + + + /// peak_size + /// + /// Returns the maximum number of outstanding allocations there have been + /// at any one time. This represents a high water mark for the allocation count. + /// + size_t peak_size() const + { + #if EASTL_FIXED_SIZE_TRACKING_ENABLED + return mnPeakSize; + #else + return 0; + #endif + } + + + /// can_allocate + /// + /// Returns true if there are any free links. + /// + bool can_allocate() const + { + return (mpHead != NULL) || (mpNext != mpCapacity); + } + + public: + /// Link + /// Implements a singly-linked list. + struct Link + { + Link* mpNext; + }; + + Link* mpHead; + Link* mpNext; + Link* mpCapacity; + size_t mnNodeSize; + + #if EASTL_FIXED_SIZE_TRACKING_ENABLED + uint32_t mnCurrentSize; /// Current number of allocated nodes. + uint32_t mnPeakSize; /// Max number of allocated nodes at any one time. + #endif + + }; // fixed_pool_base + + + + + + /////////////////////////////////////////////////////////////////////////// + // fixed_pool + /////////////////////////////////////////////////////////////////////////// + + /// fixed_pool + /// + /// Implements a simple fixed pool allocator for use by fixed-size containers. + /// This is not a generic eastl allocator which can be plugged into an arbitrary + /// eastl container, as it simplifies some functions are arguments for the + /// purpose of efficiency. + /// + class EASTL_API fixed_pool : public fixed_pool_base + { + public: + /// fixed_pool + /// + /// Default constructor. User usually will want to call init() after + /// constructing via this constructor. The pMemory argument is for the + /// purposes of temporarily storing a pointer to the buffer to be used. + /// Even though init may have a pMemory argument, this arg is useful + /// for temporary storage, as per copy construction. + /// + fixed_pool(void* pMemory = NULL) + : fixed_pool_base(pMemory) + { + } + + + /// fixed_pool + /// + /// Constructs a fixed_pool with a given set of parameters. + /// + fixed_pool(void* pMemory, size_t memorySize, size_t nodeSize, + size_t alignment, size_t alignmentOffset = 0) + { + init(pMemory, memorySize, nodeSize, alignment, alignmentOffset); + } + + + /// fixed_pool + /// + // Disabled because the default is sufficient. While it normally makes no sense to deep copy + // this data, our usage of this class is such that this is OK and wanted. + // + // fixed_pool(const fixed_pool& x) + // { + // } + + + /// operator= + /// + fixed_pool& operator=(const fixed_pool&) + { + // By design we do nothing. We don't attempt to deep-copy member data. + return *this; + } + + + /// allocate + /// + /// Allocates a new object of the size specified upon class initialization. + /// Returns NULL if there is no more memory. + /// + void* allocate() + { + Link* pLink = mpHead; + + if(pLink) // If we have space... + { + #if EASTL_FIXED_SIZE_TRACKING_ENABLED + if(++mnCurrentSize > mnPeakSize) + mnPeakSize = mnCurrentSize; + #endif + + mpHead = pLink->mpNext; + return pLink; + } + else + { + // If there's no free node in the free list, just + // allocate another from the reserved memory area + + if(mpNext != mpCapacity) + { + pLink = mpNext; + + mpNext = reinterpret_cast(reinterpret_cast(mpNext) + mnNodeSize); + + #if EASTL_FIXED_SIZE_TRACKING_ENABLED + if(++mnCurrentSize > mnPeakSize) + mnPeakSize = mnCurrentSize; + #endif + + return pLink; + } + + return NULL; + } + } + + void* allocate(size_t /*alignment*/, size_t /*offset*/) + { + return allocate(); + } + + /// deallocate + /// + /// Frees the given object which was allocated by allocate(). + /// If the given node was not allocated by allocate() then the behaviour + /// is undefined. + /// + void deallocate(void* p) + { + #if EASTL_FIXED_SIZE_TRACKING_ENABLED + --mnCurrentSize; + #endif + + ((Link*)p)->mpNext = mpHead; + mpHead = ((Link*)p); + } + + + using fixed_pool_base::can_allocate; + + + const char* get_name() const + { + return EASTL_FIXED_POOL_DEFAULT_NAME; + } + + + void set_name(const char*) + { + // Nothing to do. We don't allocate memory. + } + + }; // fixed_pool + + + + + + /////////////////////////////////////////////////////////////////////////// + // fixed_pool_with_overflow + /////////////////////////////////////////////////////////////////////////// + + /// fixed_pool_with_overflow + /// + template + class fixed_pool_with_overflow : public fixed_pool_base + { + public: + typedef OverflowAllocator overflow_allocator_type; + + + fixed_pool_with_overflow(void* pMemory = NULL) + : fixed_pool_base(pMemory), + mOverflowAllocator(EASTL_FIXED_POOL_DEFAULT_NAME) + { + // Leave mpPoolBegin, mpPoolEnd uninitialized. + } + + + fixed_pool_with_overflow(void* pMemory, const overflow_allocator_type& allocator) + : fixed_pool_base(pMemory), + mOverflowAllocator(allocator) + { + // Leave mpPoolBegin, mpPoolEnd uninitialized. + } + + + fixed_pool_with_overflow(void* pMemory, size_t memorySize, size_t nodeSize, + size_t alignment, size_t alignmentOffset = 0) + : mOverflowAllocator(EASTL_FIXED_POOL_DEFAULT_NAME) + { + fixed_pool_base::init(pMemory, memorySize, nodeSize, alignment, alignmentOffset); + + mpPoolBegin = pMemory; + } + + + fixed_pool_with_overflow(void* pMemory, size_t memorySize, size_t nodeSize, + size_t alignment, size_t alignmentOffset, + const overflow_allocator_type& allocator) + : mOverflowAllocator(allocator) + { + fixed_pool_base::init(pMemory, memorySize, nodeSize, alignment, alignmentOffset); + + mpPoolBegin = pMemory; + } + + + // Disabled because the default is sufficient. While it normally makes no sense to deep copy + // this data, our usage of this class is such that this is OK and wanted. + // + //fixed_pool_with_overflow(const fixed_pool_with_overflow& x) + //{ + // ... + //} + + + fixed_pool_with_overflow& operator=(const fixed_pool_with_overflow& x) + { + #if EASTL_ALLOCATOR_COPY_ENABLED + mOverflowAllocator = x.mOverflowAllocator; + #else + (void)x; + #endif + + return *this; + } + + + void init(void* pMemory, size_t memorySize, size_t nodeSize, + size_t alignment, size_t alignmentOffset = 0) + { + fixed_pool_base::init(pMemory, memorySize, nodeSize, alignment, alignmentOffset); + + mpPoolBegin = pMemory; + } + + + void* allocate() + { + void* p = NULL; + Link* pLink = mpHead; + + if(pLink) + { + // Unlink from chain + p = pLink; + mpHead = pLink->mpNext; + } + else + { + // If there's no free node in the free list, just + // allocate another from the reserved memory area + + if(mpNext != mpCapacity) + { + p = pLink = mpNext; + mpNext = reinterpret_cast(reinterpret_cast(mpNext) + mnNodeSize); + } + else + p = mOverflowAllocator.allocate(mnNodeSize); + } + + #if EASTL_FIXED_SIZE_TRACKING_ENABLED + if(p && (++mnCurrentSize > mnPeakSize)) + mnPeakSize = mnCurrentSize; + #endif + + return p; + } + + + void* allocate(size_t alignment, size_t alignmentOffset) + { + void* p = NULL; + Link* pLink = mpHead; + + if (pLink) + { + // Unlink from chain + p = pLink; + mpHead = pLink->mpNext; + } + else + { + // If there's no free node in the free list, just + // allocate another from the reserved memory area + + if (mpNext != mpCapacity) + { + p = pLink = mpNext; + mpNext = reinterpret_cast(reinterpret_cast(mpNext)+mnNodeSize); + } + else + { + p = allocate_memory(mOverflowAllocator, mnNodeSize, alignment, alignmentOffset); + EASTL_ASSERT_MSG(p != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined."); + } + + } + + #if EASTL_FIXED_SIZE_TRACKING_ENABLED + if (p && (++mnCurrentSize > mnPeakSize)) + mnPeakSize = mnCurrentSize; + #endif + + return p; + } + + void deallocate(void* p) + { + #if EASTL_FIXED_SIZE_TRACKING_ENABLED + --mnCurrentSize; + #endif + + if((p >= mpPoolBegin) && (p < mpCapacity)) + { + ((Link*)p)->mpNext = mpHead; + mpHead = ((Link*)p); + } + else + mOverflowAllocator.deallocate(p, (size_t)mnNodeSize); + } + + + using fixed_pool_base::can_allocate; + + + const char* get_name() const + { + return mOverflowAllocator.get_name(); + } + + + void set_name(const char* pName) + { + mOverflowAllocator.set_name(pName); + } + + + const overflow_allocator_type& get_overflow_allocator() const + { + return mOverflowAllocator; + } + + + overflow_allocator_type& get_overflow_allocator() + { + return mOverflowAllocator; + } + + + void set_overflow_allocator(const overflow_allocator_type& overflowAllocator) + { + mOverflowAllocator = overflowAllocator; + } + public: + OverflowAllocator mOverflowAllocator; + void* mpPoolBegin; // Ideally we wouldn't need this member variable. he problem is that the information about the pool buffer and object size is stored in the owning container and we can't have access to it without increasing the amount of code we need and by templating more code. It may turn out that simply storing data here is smaller in the end. + + }; // fixed_pool_with_overflow + + + + + + /////////////////////////////////////////////////////////////////////////// + // fixed_node_allocator + /////////////////////////////////////////////////////////////////////////// + + /// fixed_node_allocator + /// + /// Note: This class was previously named fixed_node_pool, but was changed because this name + /// was inconsistent with the other allocators here which ended with _allocator. + /// + /// Implements a fixed_pool with a given node count, alignment, and alignment offset. + /// fixed_node_allocator is like fixed_pool except it is templated on the node type instead + /// of being a generic allocator. All it does is pass allocations through to + /// the fixed_pool base. This functionality is separate from fixed_pool because there + /// are other uses for fixed_pool. + /// + /// We template on kNodeSize instead of node_type because the former allows for the + /// two different node_types of the same size to use the same template implementation. + /// + /// Template parameters: + /// nodeSize The size of the object to allocate. + /// nodeCount The number of objects the pool contains. + /// nodeAlignment The alignment of the objects to allocate. + /// nodeAlignmentOffset The alignment offset of the objects to allocate. + /// bEnableOverflow Whether or not we should use the overflow heap if our object pool is exhausted. + /// OverflowAllocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap. + /// + template + class fixed_node_allocator + { + public: + typedef typename conditional, fixed_pool>::type pool_type; + typedef fixed_node_allocator this_type; + typedef OverflowAllocator overflow_allocator_type; + + enum + { + kNodeSize = nodeSize, + kNodeCount = nodeCount, + kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T). + kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset, + kNodeAlignment = nodeAlignment, + kNodeAlignmentOffset = nodeAlignmentOffset + }; + + public: + pool_type mPool; + + public: + //fixed_node_allocator(const char* pName) + //{ + // mPool.set_name(pName); + //} + + + fixed_node_allocator(void* pNodeBuffer) + : mPool(pNodeBuffer, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset) + { + } + + + fixed_node_allocator(void* pNodeBuffer, const overflow_allocator_type& allocator) + : mPool(pNodeBuffer, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset, allocator) + { + } + + + /// fixed_node_allocator + /// + /// Note that we are copying x.mpHead to our own fixed_pool. This at first may seem + /// broken, as fixed pools cannot take over ownership of other fixed pools' memory. + /// However, we declare that this copy ctor can only ever be safely called when + /// the user has intentionally pre-seeded the source with the destination pointer. + /// This is somewhat playing with fire, but it allows us to get around chicken-and-egg + /// problems with containers being their own allocators, without incurring any memory + /// costs or extra code costs. There's another reason for this: we very strongly want + /// to avoid full copying of instances of fixed_pool around, especially via the stack. + /// Larger pools won't even be able to fit on many machine's stacks. So this solution + /// is also a mechanism to prevent that situation from existing and being used. + /// Perhaps some day we'll find a more elegant yet costless way around this. + /// + fixed_node_allocator(const this_type& x) + : mPool(x.mPool.mpNext, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset, x.mPool.mOverflowAllocator) + { + } + + + this_type& operator=(const this_type& x) + { + mPool = x.mPool; + return *this; + } + + + void* allocate(size_t n, int /*flags*/ = 0) + { + (void)n; + EASTL_ASSERT(n == kNodeSize); + return mPool.allocate(); + } + + + void* allocate(size_t n, size_t alignment, size_t offset, int /*flags*/ = 0) + { + (void)n; + EASTL_ASSERT(n == kNodeSize); + return mPool.allocate(alignment, offset); + } + + + void deallocate(void* p, size_t) + { + mPool.deallocate(p); + } + + + /// can_allocate + /// + /// Returns true if there are any free links. + /// + bool can_allocate() const + { + return mPool.can_allocate(); + } + + + /// reset + /// + /// This function unilaterally resets the fixed pool back to a newly initialized + /// state. This is useful for using in tandem with container reset functionality. + /// + void reset(void* pNodeBuffer) + { + mPool.init(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset); + } + + + const char* get_name() const + { + return mPool.get_name(); + } + + + void set_name(const char* pName) + { + mPool.set_name(pName); + } + + + const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT + { + return mPool.mOverflowAllocator; + } + + + overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT + { + return mPool.mOverflowAllocator; + } + + + void set_overflow_allocator(const overflow_allocator_type& allocator) + { + mPool.mOverflowAllocator = allocator; + } + + + void copy_overflow_allocator(const this_type& x) // This function exists so we can write generic code that works for allocators that do and don't have overflow allocators. + { + mPool.mOverflowAllocator = x.mPool.mOverflowAllocator; + } + + }; // fixed_node_allocator + + + // This is a near copy of the code above, with the only difference being + // the 'false' bEnableOverflow template parameter, the pool_type and this_type typedefs, + // and the get_overflow_allocator / set_overflow_allocator functions. + template + class fixed_node_allocator + { + public: + typedef fixed_pool pool_type; + typedef fixed_node_allocator this_type; + typedef OverflowAllocator overflow_allocator_type; + + enum + { + kNodeSize = nodeSize, + kNodeCount = nodeCount, + kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T). + kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset, + kNodeAlignment = nodeAlignment, + kNodeAlignmentOffset = nodeAlignmentOffset + }; + + public: + pool_type mPool; + + public: + fixed_node_allocator(void* pNodeBuffer) + : mPool(pNodeBuffer, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset) + { + } + + + fixed_node_allocator(void* pNodeBuffer, const overflow_allocator_type& /*allocator*/) // allocator is unused because bEnableOverflow is false in this specialization. + : mPool(pNodeBuffer, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset) + { + } + + + /// fixed_node_allocator + /// + /// Note that we are copying x.mpHead to our own fixed_pool. This at first may seem + /// broken, as fixed pools cannot take over ownership of other fixed pools' memory. + /// However, we declare that this copy ctor can only ever be safely called when + /// the user has intentionally pre-seeded the source with the destination pointer. + /// This is somewhat playing with fire, but it allows us to get around chicken-and-egg + /// problems with containers being their own allocators, without incurring any memory + /// costs or extra code costs. There's another reason for this: we very strongly want + /// to avoid full copying of instances of fixed_pool around, especially via the stack. + /// Larger pools won't even be able to fit on many machine's stacks. So this solution + /// is also a mechanism to prevent that situation from existing and being used. + /// Perhaps some day we'll find a more elegant yet costless way around this. + /// + fixed_node_allocator(const this_type& x) // No need to copy the overflow allocator, because bEnableOverflow is false in this specialization. + : mPool(x.mPool.mpNext, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset) + { + } + + + this_type& operator=(const this_type& x) + { + mPool = x.mPool; + return *this; + } + + + void* allocate(size_t n, int /*flags*/ = 0) + { + (void)n; + EASTL_ASSERT(n == kNodeSize); + return mPool.allocate(); + } + + + void* allocate(size_t n, size_t alignment, size_t offset, int /*flags*/ = 0) + { + (void)n; + EASTL_ASSERT(n == kNodeSize); + return mPool.allocate(alignment, offset); + } + + + void deallocate(void* p, size_t) + { + mPool.deallocate(p); + } + + + bool can_allocate() const + { + return mPool.can_allocate(); + } + + + void reset(void* pNodeBuffer) + { + mPool.init(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset); + } + + + const char* get_name() const + { + return mPool.get_name(); + } + + + void set_name(const char* pName) + { + mPool.set_name(pName); + } + + + const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT + { + EASTL_ASSERT(false); + overflow_allocator_type* pNULL = NULL; + return *pNULL; // This is not pretty, but it should never execute. This is here only to allow this to compile. + } + + + overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT + { + EASTL_ASSERT(false); + overflow_allocator_type* pNULL = NULL; + return *pNULL; // This is not pretty, but it should never execute. This is here only to allow this to compile. + } + + + void set_overflow_allocator(const overflow_allocator_type& /*allocator*/) + { + // We don't have an overflow allocator. + EASTL_ASSERT(false); + } + + + void copy_overflow_allocator(const this_type&) // This function exists so we can write generic code that works for allocators that do and don't have overflow allocators. + { + // We don't have an overflow allocator. + } + + }; // fixed_node_allocator + + + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + inline bool operator==(const fixed_node_allocator& a, + const fixed_node_allocator& b) + { + return (&a == &b); // They are only equal if they are the same object. + } + + + template + inline bool operator!=(const fixed_node_allocator& a, + const fixed_node_allocator& b) + { + return (&a != &b); // They are only equal if they are the same object. + } + + + + + + + /////////////////////////////////////////////////////////////////////////// + // fixed_hashtable_allocator + /////////////////////////////////////////////////////////////////////////// + + /// fixed_hashtable_allocator + /// + /// Provides a base class for fixed hashtable allocations. + /// To consider: Have this inherit from fixed_node_allocator. + /// + /// Template parameters: + /// bucketCount The fixed number of hashtable buckets to provide. + /// nodeCount The number of objects the pool contains. + /// nodeAlignment The alignment of the objects to allocate. + /// nodeAlignmentOffset The alignment offset of the objects to allocate. + /// bEnableOverflow Whether or not we should use the overflow heap if our object pool is exhausted. + /// OverflowAllocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap. + /// + template + class fixed_hashtable_allocator + { + public: + typedef typename conditional, fixed_pool>::type pool_type; + typedef fixed_hashtable_allocator this_type; + typedef OverflowAllocator overflow_allocator_type; + + enum + { + kBucketCount = bucketCount + 1, // '+1' because the hash table needs a null terminating bucket. + kBucketsSize = bucketCount * sizeof(void*), + kNodeSize = nodeSize, + kNodeCount = nodeCount, + kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T). + kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset, // Don't need to include kBucketsSize in this calculation, as fixed_hash_xxx containers have a separate buffer for buckets. + kNodeAlignment = nodeAlignment, + kNodeAlignmentOffset = nodeAlignmentOffset, + kAllocFlagBuckets = 0x00400000 // Flag to allocator which indicates that we are allocating buckets and not nodes. + }; + + protected: + pool_type mPool; + void* mpBucketBuffer; + + public: + // Disabled because it causes compile conflicts. + //fixed_hashtable_allocator(const char* pName) + //{ + // mPool.set_name(pName); + //} + + fixed_hashtable_allocator(void* pNodeBuffer) + : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset), + mpBucketBuffer(NULL) + { + // EASTL_ASSERT(false); // As it stands now, this is not supposed to be called. + } + + + fixed_hashtable_allocator(void* pNodeBuffer, const overflow_allocator_type& allocator) + : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset, allocator), + mpBucketBuffer(NULL) + { + // EASTL_ASSERT(false); // As it stands now, this is not supposed to be called. + } + + + fixed_hashtable_allocator(void* pNodeBuffer, void* pBucketBuffer) + : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset), + mpBucketBuffer(pBucketBuffer) + { + } + + + fixed_hashtable_allocator(void* pNodeBuffer, void* pBucketBuffer, const overflow_allocator_type& allocator) + : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset, allocator), + mpBucketBuffer(pBucketBuffer) + { + } + + + /// fixed_hashtable_allocator + /// + /// Note that we are copying x.mpHead and mpBucketBuffer to our own fixed_pool. + /// See the discussion above in fixed_node_allocator for important information about this. + /// + fixed_hashtable_allocator(const this_type& x) + : mPool(x.mPool.mpHead, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset, x.mPool.mOverflowAllocator), + mpBucketBuffer(x.mpBucketBuffer) + { + } + + + fixed_hashtable_allocator& operator=(const fixed_hashtable_allocator& x) + { + mPool = x.mPool; + return *this; + } + + + void* allocate(size_t n, int flags = 0) + { + // We expect that the caller uses kAllocFlagBuckets when it wants us to allocate buckets instead of nodes. + EASTL_CT_ASSERT(kAllocFlagBuckets == 0x00400000); // Currently we expect this to be so, because the hashtable has a copy of this enum. + + if((flags & kAllocFlagBuckets) == 0) // If we are allocating nodes and (probably) not buckets... + { + EASTL_ASSERT(n == kNodeSize); EA_UNUSED(n); + return mPool.allocate(); + } + + // If bucket size no longer fits within local buffer... + if ((flags & kAllocFlagBuckets) == kAllocFlagBuckets && (n > kBucketsSize)) + return get_overflow_allocator().allocate(n); + + EASTL_ASSERT(n <= kBucketsSize); + return mpBucketBuffer; + } + + + void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0) + { + // We expect that the caller uses kAllocFlagBuckets when it wants us to allocate buckets instead of nodes. + if ((flags & kAllocFlagBuckets) == 0) // If we are allocating nodes and (probably) not buckets... + { + EASTL_ASSERT(n == kNodeSize); EA_UNUSED(n); + return mPool.allocate(alignment, offset); + } + + // If bucket size no longer fits within local buffer... + if ((flags & kAllocFlagBuckets) == kAllocFlagBuckets && (n > kBucketsSize)) + return get_overflow_allocator().allocate(n, alignment, offset); + + EASTL_ASSERT(n <= kBucketsSize); + return mpBucketBuffer; + } + + + void deallocate(void* p, size_t) + { + if(p != mpBucketBuffer) // If we are freeing a node and not buckets... + mPool.deallocate(p); + } + + + bool can_allocate() const + { + return mPool.can_allocate(); + } + + + void reset(void* pNodeBuffer) + { + // No need to modify mpBucketBuffer, as that is constant. + mPool.init(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset); + } + + + const char* get_name() const + { + return mPool.get_name(); + } + + + void set_name(const char* pName) + { + mPool.set_name(pName); + } + + + const overflow_allocator_type& get_overflow_allocator() const + { + return mPool.mOverflowAllocator; + } + + + overflow_allocator_type& get_overflow_allocator() + { + return mPool.mOverflowAllocator; + } + + + void set_overflow_allocator(const overflow_allocator_type& allocator) + { + mPool.mOverflowAllocator = allocator; + } + + + void copy_overflow_allocator(const this_type& x) // This function exists so we can write generic code that works for allocators that do and don't have overflow allocators. + { + mPool.mOverflowAllocator = x.mPool.mOverflowAllocator; + } + + }; // fixed_hashtable_allocator + + + // This is a near copy of the code above, with the only difference being + // the 'false' bEnableOverflow template parameter, the pool_type and this_type typedefs, + // and the get_overflow_allocator / set_overflow_allocator functions. + template + class fixed_hashtable_allocator + { + public: + typedef fixed_pool pool_type; + typedef fixed_hashtable_allocator this_type; + typedef OverflowAllocator overflow_allocator_type; + + enum + { + kBucketCount = bucketCount + 1, // '+1' because the hash table needs a null terminating bucket. + kBucketsSize = bucketCount * sizeof(void*), + kNodeSize = nodeSize, + kNodeCount = nodeCount, + kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T). + kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset, // Don't need to include kBucketsSize in this calculation, as fixed_hash_xxx containers have a separate buffer for buckets. + kNodeAlignment = nodeAlignment, + kNodeAlignmentOffset = nodeAlignmentOffset, + kAllocFlagBuckets = 0x00400000 // Flag to allocator which indicates that we are allocating buckets and not nodes. + }; + + protected: + pool_type mPool; + void* mpBucketBuffer; + + public: + // Disabled because it causes compile conflicts. + //fixed_hashtable_allocator(const char* pName) + //{ + // mPool.set_name(pName); + //} + + fixed_hashtable_allocator(void* pNodeBuffer) + : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset), + mpBucketBuffer(NULL) + { + // EASTL_ASSERT(false); // As it stands now, this is not supposed to be called. + } + + fixed_hashtable_allocator(void* pNodeBuffer, const overflow_allocator_type& /*allocator*/) // allocator is unused because bEnableOverflow is false in this specialization. + : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset), + mpBucketBuffer(NULL) + { + // EASTL_ASSERT(false); // As it stands now, this is not supposed to be called. + } + + + fixed_hashtable_allocator(void* pNodeBuffer, void* pBucketBuffer) + : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset), + mpBucketBuffer(pBucketBuffer) + { + } + + + fixed_hashtable_allocator(void* pNodeBuffer, void* pBucketBuffer, const overflow_allocator_type& /*allocator*/) // allocator is unused because bEnableOverflow is false in this specialization. + : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset), + mpBucketBuffer(pBucketBuffer) + { + } + + + /// fixed_hashtable_allocator + /// + /// Note that we are copying x.mpHead and mpBucketBuffer to our own fixed_pool. + /// See the discussion above in fixed_node_allocator for important information about this. + /// + fixed_hashtable_allocator(const this_type& x) // No need to copy the overflow allocator, because bEnableOverflow is false in this specialization. + : mPool(x.mPool.mpHead, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset), + mpBucketBuffer(x.mpBucketBuffer) + { + } + + + fixed_hashtable_allocator& operator=(const fixed_hashtable_allocator& x) + { + mPool = x.mPool; + return *this; + } + + + void* allocate(size_t n, int flags = 0) + { + // We expect that the caller uses kAllocFlagBuckets when it wants us to allocate buckets instead of nodes. + EASTL_CT_ASSERT(kAllocFlagBuckets == 0x00400000); // Currently we expect this to be so, because the hashtable has a copy of this enum. + if((flags & kAllocFlagBuckets) == 0) // If we are allocating nodes and (probably) not buckets... + { + EASTL_ASSERT(n == kNodeSize); (void)n; // Make unused var warning go away. + return mPool.allocate(); + } + + // Don't allow hashtable buckets to overflow in this case. + EASTL_ASSERT(n <= kBucketsSize); + return mpBucketBuffer; + } + + + void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0) + { + // We expect that the caller uses kAllocFlagBuckets when it wants us to allocate buckets instead of nodes. + if((flags & kAllocFlagBuckets) == 0) // If we are allocating nodes and (probably) not buckets... + { + EASTL_ASSERT(n == kNodeSize); (void)n; // Make unused var warning go away. + return mPool.allocate(alignment, offset); + } + + // Don't allow hashtable buckets to overflow in this case. + EASTL_ASSERT(n <= kBucketsSize); + return mpBucketBuffer; + } + + + void deallocate(void* p, size_t) + { + if(p != mpBucketBuffer) // If we are freeing a node and not buckets... + mPool.deallocate(p); + } + + + bool can_allocate() const + { + return mPool.can_allocate(); + } + + + void reset(void* pNodeBuffer) + { + // No need to modify mpBucketBuffer, as that is constant. + mPool.init(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset); + } + + + const char* get_name() const + { + return mPool.get_name(); + } + + + void set_name(const char* pName) + { + mPool.set_name(pName); + } + + + const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT + { + EASTL_ASSERT(false); + overflow_allocator_type* pNULL = NULL; + return *pNULL; // This is not pretty, but it should never execute. This is here only to allow this to compile. + } + + + overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT + { + EASTL_ASSERT(false); + overflow_allocator_type* pNULL = NULL; + return *pNULL; // This is not pretty, but it should never execute. This is here only to allow this to compile. + } + + void set_overflow_allocator(const overflow_allocator_type& /*allocator*/) + { + // We don't have an overflow allocator. + EASTL_ASSERT(false); + } + + void copy_overflow_allocator(const this_type&) // This function exists so we can write generic code that works for allocators that do and don't have overflow allocators. + { + // We don't have an overflow allocator. + } + + }; // fixed_hashtable_allocator + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + inline bool operator==(const fixed_hashtable_allocator& a, + const fixed_hashtable_allocator& b) + { + return (&a == &b); // They are only equal if they are the same object. + } + + + template + inline bool operator!=(const fixed_hashtable_allocator& a, + const fixed_hashtable_allocator& b) + { + return (&a != &b); // They are only equal if they are the same object. + } + + + + + + + /////////////////////////////////////////////////////////////////////////// + // fixed_vector_allocator + /////////////////////////////////////////////////////////////////////////// + + /// fixed_vector_allocator + /// + /// Template parameters: + /// nodeSize The size of individual objects. + /// nodeCount The number of objects the pool contains. + /// nodeAlignment The alignment of the objects to allocate. + /// nodeAlignmentOffset The alignment offset of the objects to allocate. + /// bEnableOverflow Whether or not we should use the overflow heap if our object pool is exhausted. + /// OverflowAllocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap. + /// + template + class fixed_vector_allocator + { + public: + typedef fixed_vector_allocator this_type; + typedef OverflowAllocator overflow_allocator_type; + + enum + { + kNodeSize = nodeSize, + kNodeCount = nodeCount, + kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T). + kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset, + kNodeAlignment = nodeAlignment, + kNodeAlignmentOffset = nodeAlignmentOffset + }; + + public: + overflow_allocator_type mOverflowAllocator; + void* mpPoolBegin; // To consider: Find some way to make this data unnecessary, without increasing template proliferation. + + public: + // Disabled because it causes compile conflicts. + //fixed_vector_allocator(const char* pName = NULL) + //{ + // mOverflowAllocator.set_name(pName); + //} + + fixed_vector_allocator(void* pNodeBuffer = nullptr) + : mpPoolBegin(pNodeBuffer) + { + } + + fixed_vector_allocator(void* pNodeBuffer, const overflow_allocator_type& allocator) + : mOverflowAllocator(allocator), mpPoolBegin(pNodeBuffer) + { + } + + fixed_vector_allocator(const fixed_vector_allocator& x) + : mOverflowAllocator(x.mOverflowAllocator), mpPoolBegin(x.mpPoolBegin) + { + } + + fixed_vector_allocator& operator=(const fixed_vector_allocator& x) + { + // We leave our mpPoolBegin variable alone. + + #if EASTL_ALLOCATOR_COPY_ENABLED + mOverflowAllocator = x.mOverflowAllocator; + #else + (void)x; + #endif + + return *this; + } + + void* allocate(size_t n, int flags = 0) + { + return mOverflowAllocator.allocate(n, flags); + } + + void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0) + { + return mOverflowAllocator.allocate(n, alignment, offset, flags); + } + + void deallocate(void* p, size_t n) + { + if(p != mpPoolBegin) + mOverflowAllocator.deallocate(p, n); // Can't do this to our own allocation. + } + + const char* get_name() const + { + return mOverflowAllocator.get_name(); + } + + void set_name(const char* pName) + { + mOverflowAllocator.set_name(pName); + } + + const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT + { + return mOverflowAllocator; + } + + overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT + { + return mOverflowAllocator; + } + + void set_overflow_allocator(const overflow_allocator_type& allocator) + { + mOverflowAllocator = allocator; + } + + void copy_overflow_allocator(const this_type& x) // This function exists so we can write generic code that works for allocators that do and don't have overflow allocators. + { + mOverflowAllocator = x.mOverflowAllocator; + } + + }; // fixed_vector_allocator + + + template + class fixed_vector_allocator + { + public: + typedef fixed_vector_allocator this_type; + typedef OverflowAllocator overflow_allocator_type; + + enum + { + kNodeSize = nodeSize, + kNodeCount = nodeCount, + kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T). + kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset, + kNodeAlignment = nodeAlignment, + kNodeAlignmentOffset = nodeAlignmentOffset + }; + + // Disabled because it causes compile conflicts. + //fixed_vector_allocator(const char* = NULL) // This char* parameter is present so that this class can be like the other version. + //{ + //} + + fixed_vector_allocator() + { + } + + fixed_vector_allocator(void* /*pNodeBuffer*/) + { + } + + fixed_vector_allocator(void* /*pNodeBuffer*/, const overflow_allocator_type& /*allocator*/) // allocator is unused because bEnableOverflow is false in this specialization. + { + } + + /// fixed_vector_allocator + /// + // Disabled because there is nothing to do. No member data. And the default for this is sufficient. + // fixed_vector_allocator(const fixed_vector_allocator&) + // { + // } + + // Disabled because there is nothing to do. No member data. + //fixed_vector_allocator& operator=(const fixed_vector_allocator& x) + //{ + // return *this; + //} + + void* allocate(size_t /*n*/, int /*flags*/ = 0) + { + EASTL_ASSERT(false); // A fixed_vector should not reallocate, else the user has exhausted its space. + EASTL_CRASH(); // We choose to crash here since the owning vector can't handle an allocator returning null. Better to crash earlier. + return NULL; + } + + void* allocate(size_t /*n*/, size_t /*alignment*/, size_t /*offset*/, int /*flags*/ = 0) + { + EASTL_ASSERT(false); // A fixed_vector should not reallocate, else the user has exhausted its space. + EASTL_CRASH(); // We choose to crash here since the owning vector can't handle an allocator returning null. Better to crash earlier. + return NULL; + } + + void deallocate(void* /*p*/, size_t /*n*/) + { + } + + const char* get_name() const + { + return EASTL_FIXED_POOL_DEFAULT_NAME; + } + + void set_name(const char* /*pName*/) + { + } + + const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT + { + EASTL_ASSERT(false); + overflow_allocator_type* pNULL = NULL; + return *pNULL; // This is not pretty, but it should never execute. This is here only to allow this to compile. + } + + overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT + { + EASTL_ASSERT(false); + overflow_allocator_type* pNULL = NULL; + return *pNULL; // This is not pretty, but it should never execute. This is here only to allow this to compile. + } + + void set_overflow_allocator(const overflow_allocator_type& /*allocator*/) + { + // We don't have an overflow allocator. + EASTL_ASSERT(false); + } + + void copy_overflow_allocator(const this_type&) // This function exists so we can write generic code that works for allocators that do and don't have overflow allocators. + { + // We don't have an overflow allocator. + } + + }; // fixed_vector_allocator + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + inline bool operator==(const fixed_vector_allocator& a, + const fixed_vector_allocator& b) + { + return (&a == &b); // They are only equal if they are the same object. + } + + + template + inline bool operator!=(const fixed_vector_allocator& a, + const fixed_vector_allocator& b) + { + return (&a != &b); // They are only equal if they are the same object. + } + + + + + + /////////////////////////////////////////////////////////////////////////// + // fixed_swap + /////////////////////////////////////////////////////////////////////////// + + /// fixed_swap + /// + /// This function implements a swap suitable for fixed containers. + /// This is an issue because the size of fixed containers can be very + /// large, due to their having the container buffer within themselves. + /// Note that we are referring to sizeof(container) and not the total + /// sum of memory allocated by the container from the heap. + /// + /// + /// This implementation switches at compile time whether or not the + /// temporary is allocated on the stack or the heap as some compilers + /// will allocate the (large) stack frame regardless of which code + /// path is picked. + template + class fixed_swap_impl + { + public: + static void swap(Container& a, Container& b); + }; + + + template + class fixed_swap_impl + { + public: + static void swap(Container& a, Container& b) + { + Container temp(EASTL_MOVE(a)); // Can't use global swap because that could + a = EASTL_MOVE(b); // itself call this swap function in return. + b = EASTL_MOVE(temp); + } + }; + + + template + class fixed_swap_impl + { + public: + static void swap(Container& a, Container& b) + { + EASTLAllocatorType allocator(*EASTLAllocatorDefault(), EASTL_TEMP_DEFAULT_NAME); + void* const pMemory = allocator.allocate(sizeof(a)); + + if(pMemory) + { + Container* pTemp = ::new(pMemory) Container(EASTL_MOVE(a)); + a = EASTL_MOVE(b); + b = EASTL_MOVE(*pTemp); + + pTemp->~Container(); + allocator.deallocate(pMemory, sizeof(a)); + } + } + }; + + + template + void fixed_swap(Container& a, Container& b) + { + return fixed_swap_impl= EASTL_MAX_STACK_USAGE>::swap(a, b); + } + + + +} // namespace eastl + + +EA_RESTORE_VC_WARNING(); + + +#endif // Header include guard diff --git a/external/EASTL/include/EASTL/internal/function.h b/external/EASTL/include/EASTL/internal/function.h new file mode 100644 index 00000000..166f30d1 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/function.h @@ -0,0 +1,171 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_FUNCTION_H +#define EASTL_FUNCTION_H + +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +#include + +namespace eastl +{ + + /// EASTL_FUNCTION_DEFAULT_CAPTURE_SSO_SIZE + /// + /// Defines the size of the SSO buffer which is used to hold the specified capture state of the callable. + /// + #ifndef EASTL_FUNCTION_DEFAULT_CAPTURE_SSO_SIZE + #define EASTL_FUNCTION_DEFAULT_CAPTURE_SSO_SIZE (2 * sizeof(void*)) + #endif + + static_assert(EASTL_FUNCTION_DEFAULT_CAPTURE_SSO_SIZE >= sizeof(void*), "functor storage must be able to hold at least a pointer!"); + + template + class function; + + template + class function : public internal::function_detail + { + private: + using Base = internal::function_detail; + public: + using typename Base::result_type; + + function() EA_NOEXCEPT = default; + function(std::nullptr_t p) EA_NOEXCEPT + : Base(p) + { + } + + function(const function& other) + : Base(other) + { + } + + function(function&& other) + : Base(eastl::move(other)) + { + } + + template + function(Functor functor) + : Base(eastl::move(functor)) + { + } + + ~function() EA_NOEXCEPT = default; + + function& operator=(const function& other) + { + Base::operator=(other); + return *this; + } + + function& operator=(function&& other) + { + Base::operator=(eastl::move(other)); + return *this; + } + + function& operator=(std::nullptr_t p) EA_NOEXCEPT + { + Base::operator=(p); + return *this; + } + + template + function& operator=(Functor&& functor) + { + Base::operator=(eastl::forward(functor)); + return *this; + } + + template + function& operator=(eastl::reference_wrapper f) EA_NOEXCEPT + { + Base::operator=(f); + return *this; + } + + void swap(function& other) EA_NOEXCEPT + { + Base::swap(other); + } + + explicit operator bool() const EA_NOEXCEPT + { + return Base::operator bool(); + } + + R operator ()(Args... args) const + { + return Base::operator ()(eastl::forward(args)...); + } + + #if EASTL_RTTI_ENABLED + const std::type_info& target_type() const EA_NOEXCEPT + { + return Base::target_type(); + } + + template + Functor* target() EA_NOEXCEPT + { + return Base::template target(); + } + + template + const Functor* target() const EA_NOEXCEPT + { + return Base::template target(); + } + #endif // EASTL_RTTI_ENABLED + }; + + template + bool operator==(const function& f, std::nullptr_t) EA_NOEXCEPT + { + return !f; + } +#if !defined(EA_COMPILER_HAS_THREE_WAY_COMPARISON) + template + bool operator==(std::nullptr_t, const function& f) EA_NOEXCEPT + { + return !f; + } + + template + bool operator!=(const function& f, std::nullptr_t) EA_NOEXCEPT + { + return !!f; + } + + template + bool operator!=(std::nullptr_t, const function& f) EA_NOEXCEPT + { + return !!f; + } +#endif + template + void swap(function& lhs, function& rhs) + { + lhs.swap(rhs); + } + +#ifdef __cpp_deduction_guides + template + function(ReturnType(*)(Args...)) -> function; + + template + function(Callable) -> function>; +#endif + +} // namespace eastl + +#endif // EASTL_FUNCTION_H diff --git a/external/EASTL/include/EASTL/internal/function_detail.h b/external/EASTL/include/EASTL/internal/function_detail.h new file mode 100644 index 00000000..2b82c3e7 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/function_detail.h @@ -0,0 +1,723 @@ +/////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_FUNCTION_DETAIL_H +#define EASTL_FUNCTION_DETAIL_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + +#if EASTL_RTTI_ENABLED + #include +#endif + +#if EASTL_EXCEPTIONS_ENABLED + EA_DISABLE_ALL_VC_WARNINGS() + #include + #include + EA_RESTORE_ALL_VC_WARNINGS() +#endif + +#define EASTL_EMPTY_FUNCTION_QUALIFIER + +// https://en.cppreference.com/w/cpp/language/noexcept_spec +// The noexcept-specification is a part of the function type in C++17 and above, but not before. +#if defined(EA_COMPILER_CPP17_ENABLED) +#define EASTL_GENERATE_MEMBER_FUNCTION_NOEXCEPT_VARIANTS(PATTERN, OTHER_QUALIFIER) \ + PATTERN(OTHER_QUALIFIER EASTL_EMPTY_FUNCTION_QUALIFIER) \ + PATTERN(OTHER_QUALIFIER noexcept) +#else +#define EASTL_GENERATE_MEMBER_FUNCTION_NOEXCEPT_VARIANTS(PATTERN, OTHER_QUALIFIER) \ + PATTERN(OTHER_QUALIFIER EASTL_EMPTY_FUNCTION_QUALIFIER) +#endif + +#define EASTL_GENERATE_MEMBER_FUNCTION_REF_VARIANTS(PATTERN, OTHER_QUALIFIER) \ + EASTL_GENERATE_MEMBER_FUNCTION_NOEXCEPT_VARIANTS(PATTERN, OTHER_QUALIFIER EASTL_EMPTY_FUNCTION_QUALIFIER) \ + EASTL_GENERATE_MEMBER_FUNCTION_NOEXCEPT_VARIANTS(PATTERN, OTHER_QUALIFIER &) \ + EASTL_GENERATE_MEMBER_FUNCTION_NOEXCEPT_VARIANTS(PATTERN, OTHER_QUALIFIER &&) + +#define EASTL_GENERATE_MEMBER_FUNCTION_VOLATILE_VARIANTS(PATTERN, OTHER_QUALIFIER) \ + EASTL_GENERATE_MEMBER_FUNCTION_REF_VARIANTS(PATTERN, OTHER_QUALIFIER EASTL_EMPTY_FUNCTION_QUALIFIER) \ + EASTL_GENERATE_MEMBER_FUNCTION_REF_VARIANTS(PATTERN, OTHER_QUALIFIER volatile) + +#define EASTL_GENERATE_MEMBER_FUNCTION_CONST_VARIANTS(PATTERN) \ + EASTL_GENERATE_MEMBER_FUNCTION_VOLATILE_VARIANTS(PATTERN, EASTL_EMPTY_FUNCTION_QUALIFIER) \ + EASTL_GENERATE_MEMBER_FUNCTION_VOLATILE_VARIANTS(PATTERN, const) + +// Helper to generate all combination of qualifiers you can apply to member functions. +// PATTERN must be a macro that will receive as an argument a possible combination of qualifiers and generate a pattern from it. +#define EASTL_GENERATE_MEMBER_FUNCTION_VARIANTS(PATTERN) EASTL_GENERATE_MEMBER_FUNCTION_CONST_VARIANTS(PATTERN) + +namespace eastl +{ + #if EASTL_EXCEPTIONS_ENABLED + class bad_function_call : public std::exception + { + public: + bad_function_call() EA_NOEXCEPT = default; + + const char* what() const EA_NOEXCEPT EA_OVERRIDE + { + return "bad function_detail call"; + } + }; + #endif + + namespace internal + { + class unused_class {}; + + union functor_storage_alignment + { + void (*unused_func_ptr)(void); + void (unused_class::*unused_func_mem_ptr)(void); + void* unused_ptr; + }; + + template + struct functor_storage + { + static_assert(SIZE_IN_BYTES >= 0, "local buffer storage cannot have a negative size!"); + template + Ret& GetStorageTypeRef() const + { + return *reinterpret_cast(const_cast(&storage[0])); + } + + union + { + functor_storage_alignment align; + char storage[SIZE_IN_BYTES]; + }; + }; + + template <> + struct functor_storage<0> + { + template + Ret& GetStorageTypeRef() const + { + return *reinterpret_cast(const_cast(&storage[0])); + } + + union + { + functor_storage_alignment align; + char storage[sizeof(functor_storage_alignment)]; + }; + }; + + template + struct is_functor_inplace_allocatable + { + static EA_CONSTEXPR bool value = + sizeof(Functor) <= sizeof(functor_storage) && + (eastl::alignment_of_v> % eastl::alignment_of_v) == 0; + }; + + + /// function_base_detail + /// + template + class function_base_detail + { + public: + using FunctorStorageType = functor_storage; + FunctorStorageType mStorage; + + enum ManagerOperations : int + { + MGROPS_DESTRUCT_FUNCTOR = 0, + MGROPS_COPY_FUNCTOR = 1, + MGROPS_MOVE_FUNCTOR = 2, + #if EASTL_RTTI_ENABLED + MGROPS_GET_TYPE_INFO = 3, + MGROPS_GET_FUNC_PTR = 4, + #endif + }; + + // Functor can be allocated inplace + template + class function_manager_base + { + public: + + static Functor* GetFunctorPtr(const FunctorStorageType& storage) EA_NOEXCEPT + { + return &(storage.template GetStorageTypeRef()); + } + + template + static void CreateFunctor(FunctorStorageType& storage, T&& functor) + { + ::new (GetFunctorPtr(storage)) Functor(eastl::forward(functor)); + } + + static void DestructFunctor(FunctorStorageType& storage) + { + GetFunctorPtr(storage)->~Functor(); + } + + static void CopyFunctor(FunctorStorageType& to, const FunctorStorageType& from) + { + ::new (GetFunctorPtr(to)) Functor(*GetFunctorPtr(from)); + } + + static void MoveFunctor(FunctorStorageType& to, FunctorStorageType& from) EA_NOEXCEPT + { + ::new (GetFunctorPtr(to)) Functor(eastl::move(*GetFunctorPtr(from))); + } + + static void* Manager(void* to, void* from, typename function_base_detail::ManagerOperations ops) EA_NOEXCEPT + { + switch (ops) + { + case MGROPS_DESTRUCT_FUNCTOR: + { + DestructFunctor(*static_cast(to)); + } + break; + case MGROPS_COPY_FUNCTOR: + { + CopyFunctor(*static_cast(to), + *static_cast(from)); + } + break; + case MGROPS_MOVE_FUNCTOR: + { + MoveFunctor(*static_cast(to), *static_cast(from)); + DestructFunctor(*static_cast(from)); + } + break; + default: + break; + } + return nullptr; + } + }; + + // Functor is allocated on the heap + template + class function_manager_base::value>::type> + { + public: + static Functor* GetFunctorPtr(const FunctorStorageType& storage) EA_NOEXCEPT + { + return storage.template GetStorageTypeRef(); + } + + static Functor*& GetFunctorPtrRef(const FunctorStorageType& storage) EA_NOEXCEPT + { + return storage.template GetStorageTypeRef(); + } + + template + static void CreateFunctor(FunctorStorageType& storage, T&& functor) + { + auto& allocator = *EASTLAllocatorDefault(); + Functor* func = static_cast(allocator.allocate(sizeof(Functor), alignof(Functor), 0)); + + #if EASTL_EXCEPTIONS_ENABLED + if (!func) + { + throw std::bad_alloc(); + } + #else + EASTL_ASSERT_MSG(func != nullptr, "Allocation failed!"); + #endif + + ::new (static_cast(func)) Functor(eastl::forward(functor)); + GetFunctorPtrRef(storage) = func; + } + + static void DestructFunctor(FunctorStorageType& storage) + { + Functor* func = GetFunctorPtr(storage); + if (func) + { + auto& allocator = *EASTLAllocatorDefault(); + func->~Functor(); + allocator.deallocate(static_cast(func), sizeof(Functor)); + } + } + + static void CopyFunctor(FunctorStorageType& to, const FunctorStorageType& from) + { + auto& allocator = *EASTLAllocatorDefault(); + Functor* func = static_cast(allocator.allocate(sizeof(Functor), alignof(Functor), 0)); + #if EASTL_EXCEPTIONS_ENABLED + if (!func) + { + throw std::bad_alloc(); + } + #else + EASTL_ASSERT_MSG(func != nullptr, "Allocation failed!"); + #endif + ::new (static_cast(func)) Functor(*GetFunctorPtr(from)); + GetFunctorPtrRef(to) = func; + } + + static void MoveFunctor(FunctorStorageType& to, FunctorStorageType& from) EA_NOEXCEPT + { + Functor* func = GetFunctorPtr(from); + GetFunctorPtrRef(to) = func; + GetFunctorPtrRef(from) = nullptr; + } + + static void* Manager(void* to, void* from, typename function_base_detail::ManagerOperations ops) EA_NOEXCEPT + { + switch (ops) + { + case MGROPS_DESTRUCT_FUNCTOR: + { + DestructFunctor(*static_cast(to)); + } + break; + case MGROPS_COPY_FUNCTOR: + { + CopyFunctor(*static_cast(to), + *static_cast(from)); + } + break; + case MGROPS_MOVE_FUNCTOR: + { + MoveFunctor(*static_cast(to), *static_cast(from)); + // Moved ptr, no need to destruct ourselves + } + break; + default: + break; + } + return nullptr; + } + }; + + template + class function_manager final : public function_manager_base + { + public: + using Base = function_manager_base; + + #if EASTL_RTTI_ENABLED + static void* GetTypeInfo() EA_NOEXCEPT + { + return reinterpret_cast(const_cast(&typeid(Functor))); + } + + static void* Manager(void* to, void* from, typename function_base_detail::ManagerOperations ops) EA_NOEXCEPT + { + switch (ops) + { + case MGROPS_GET_TYPE_INFO: + { + return GetTypeInfo(); + } + break; + case MGROPS_GET_FUNC_PTR: + { + return static_cast(Base::GetFunctorPtr(*static_cast(to))); + } + break; + default: + { + return Base::Manager(to, from, ops); + } + break; + } + } + #endif // EASTL_RTTI_ENABLED + + /** + * NOTE: + * + * The order of arguments here is vital to the call optimization. Let's dig into why and look at some asm. + * We have two invoker signatures to consider: + * R Invoker(const FunctorStorageType& functor, Args... args) + * R Invoker(Args... args, const FunctorStorageType& functor) + * + * Assume we are using the Windows x64 Calling Convention where the first 4 arguments are passed into + * RCX, RDX, R8, R9. This optimization works for any Calling Convention, we are just using Windows x64 for + * this example. + * + * Given the following member function: void TestMemberFunc(int a, int b) + * RCX == this + * RDX == a + * R8 == b + * + * All three arguments to the function including the hidden this pointer, which in C++ is always the first argument + * are passed into the first three registers. + * The function call chain for eastl::function<>() is as follows: + * operator ()(this, Args... args) -> Invoker(Args... args, this->mStorage) -> StoredFunction(Args... arg) + * + * Let's look at what is happening at the asm level with the different Invoker function signatures and why. + * + * You will notice that operator ()() and Invoker() have the arguments reversed. operator ()() just directly calls + * to Invoker(), it is a tail call, so we force inline the call operator to ensure we directly call to the Invoker(). + * Most compilers always inline it anyways by default; have been instances where it doesn't even though the asm ends + * up being cheaper. + * call -> call -> call versus call -> call + * + * eastl::function = FunctionPointer + * + * Assume we have the above eastl::function object that holds a pointer to a function as the internal callable. + * + * Invoker(this->mStorage, Args... args) is called with the follow arguments in registers: + * RCX = this | RDX = a | R8 = b + * + * Inside Invoker() we use RCX to deference into the eastl::function object and get the function pointer to call. + * This function to call has signature Func(int, int) and thus requires its arguments in registers RCX and RDX. + * The compiler must shift all the arguments towards the left. The full asm looks something as follows. + * + * Calling Invoker: Inside Invoker: + * + * mov rcx, this mov rax, [rcx] + * mov rdx, a mov rcx, rdx + * mov r8, b mov rdx, r8 + * call [rcx + offset to Invoker] jmp [rax] + * + * Notice how the compiler shifts all the arguments before calling the callable and also we only use the this pointer + * to access the internal storage inside the eastl::function object. + * + * Invoker(Args... args, this->mStorage) is called with the following arguments in registers: + * RCX = a | RDX = b | R8 = this + * + * You can see we no longer have to shift the arguments down when going to call the internal stored callable. + * + * Calling Invoker: Inside Invoker: + * + * mov rcx, a mov rax, [r8] + * mov rdx, b jmp [rax] + * mov r8, this + * call [r8 + offset to Invoker] + * + * The generated asm does a straight tail jmp to the loaded function pointer. The arguments are already in the correct + * registers. + * + * For Functors or Lambdas with no captures, this gives us another free register to use to pass arguments since the this + * is at the end, it can be passed onto the stack if we run out of registers. Since the callable has no captures; inside + * the Invoker(), we won't ever need to touch this thus we can just call the operator ()() or let the compiler inline it. + * + * For a callable with captures there is no perf hit since the callable in the common case is inlined and the pointer to the callable + * buffer is passed in a register which the compiler can use to access the captures. + * + * For eastl::function that a holds a pointer to member function. The this pointers is implicitly + * the first argument in the argument list, const T&, and the member function pointer will be called on that object. + * This prevents any argument shifting since the this for the member function pointer is already in RCX. + * + * This is why having this at the end of the argument list is important for generating efficient Invoker() thunks. + */ + static R Invoker(Args... args, const FunctorStorageType& functor) + { + return eastl::invoke(*Base::GetFunctorPtr(functor), eastl::forward(args)...); + } + }; + + function_base_detail() EA_NOEXCEPT = default; + ~function_base_detail() EA_NOEXCEPT = default; + }; + + #define EASTL_INTERNAL_FUNCTION_VALID_FUNCTION_ARGS(FUNCTOR, RET, ARGS, BASE, MYSELF) \ + typename eastl::enable_if_t && \ + !eastl::is_base_of_v> && \ + !eastl::is_same_v, MYSELF>> + + #define EASTL_INTERNAL_FUNCTION_DETAIL_VALID_FUNCTION_ARGS(FUNCTOR, RET, ARGS, MYSELF) \ + EASTL_INTERNAL_FUNCTION_VALID_FUNCTION_ARGS(FUNCTOR, RET, ARGS, MYSELF, MYSELF) + + + /// function_detail + /// + template + class function_detail; + + template + class function_detail : public function_base_detail + { + public: + using result_type = R; + + protected: + using Base = function_base_detail; + using FunctorStorageType = typename function_base_detail::FunctorStorageType; + using Base::mStorage; + + public: + function_detail() EA_NOEXCEPT = default; + function_detail(std::nullptr_t) EA_NOEXCEPT {} + + function_detail(const function_detail& other) + { + if (this != &other) + { + Copy(other); + } + } + + function_detail(function_detail&& other) + { + if (this != &other) + { + Move(eastl::move(other)); + } + } + + template + function_detail(Functor functor) + { + CreateForwardFunctor(eastl::move(functor)); + } + + ~function_detail() EA_NOEXCEPT + { + Destroy(); + } + + function_detail& operator=(const function_detail& other) + { + if (this != &other) + { + Destroy(); + Copy(other); + } + + return *this; + } + + function_detail& operator=(function_detail&& other) + { + if(this != &other) + { + Destroy(); + Move(eastl::move(other)); + } + + return *this; + } + + function_detail& operator=(std::nullptr_t) EA_NOEXCEPT + { + Destroy(); + mMgrFuncPtr = nullptr; + mInvokeFuncPtr = &DefaultInvoker; + + return *this; + } + + template + function_detail& operator=(Functor&& functor) + { + Destroy(); + CreateForwardFunctor(eastl::forward(functor)); + return *this; + } + + template + function_detail& operator=(eastl::reference_wrapper f) EA_NOEXCEPT + { + Destroy(); + CreateForwardFunctor(f); + return *this; + } + + void swap(function_detail& other) EA_NOEXCEPT + { + if(this == &other) + return; + + FunctorStorageType tempStorage; + if (other.HaveManager()) + { + (void)(*other.mMgrFuncPtr)(static_cast(&tempStorage), static_cast(&other.mStorage), + Base::ManagerOperations::MGROPS_MOVE_FUNCTOR); + } + + if (HaveManager()) + { + (void)(*mMgrFuncPtr)(static_cast(&other.mStorage), static_cast(&mStorage), + Base::ManagerOperations::MGROPS_MOVE_FUNCTOR); + } + + if (other.HaveManager()) + { + (void)(*other.mMgrFuncPtr)(static_cast(&mStorage), static_cast(&tempStorage), + Base::ManagerOperations::MGROPS_MOVE_FUNCTOR); + } + + eastl::swap(mMgrFuncPtr, other.mMgrFuncPtr); + eastl::swap(mInvokeFuncPtr, other.mInvokeFuncPtr); + } + + explicit operator bool() const EA_NOEXCEPT + { + return HaveManager(); + } + + EASTL_FORCE_INLINE R operator ()(Args... args) const + { + return (*mInvokeFuncPtr)(eastl::forward(args)..., this->mStorage); + } + + #if EASTL_RTTI_ENABLED + const std::type_info& target_type() const EA_NOEXCEPT + { + if (HaveManager()) + { + void* ret = (*mMgrFuncPtr)(nullptr, nullptr, Base::ManagerOperations::MGROPS_GET_TYPE_INFO); + return *(static_cast(ret)); + } + return typeid(void); + } + + template + Functor* target() EA_NOEXCEPT + { + if (HaveManager() && target_type() == typeid(Functor)) + { + void* ret = (*mMgrFuncPtr)(static_cast(&mStorage), nullptr, + Base::ManagerOperations::MGROPS_GET_FUNC_PTR); + return ret ? static_cast(ret) : nullptr; + } + return nullptr; + } + + template + const Functor* target() const EA_NOEXCEPT + { + if (HaveManager() && target_type() == typeid(Functor)) + { + // Note: the const_cast on &mStorage is "safe" here because we're doing a + // MGROPS_GET_FUNC_PTR operation. We can't change the entire signature + // of mMgrFuncPtr because we use it to modify the storage with other + // operations. + const void* ret = (*mMgrFuncPtr)(static_cast(const_cast(&mStorage)), nullptr, + Base::ManagerOperations::MGROPS_GET_FUNC_PTR); + return ret ? static_cast(ret) : nullptr; + } + return nullptr; + } + #endif // EASTL_RTTI_ENABLED + + private: + bool HaveManager() const EA_NOEXCEPT + { + return (mMgrFuncPtr != nullptr); + } + + void Destroy() EA_NOEXCEPT + { + if (HaveManager()) + { + (void)(*mMgrFuncPtr)(static_cast(&mStorage), nullptr, + Base::ManagerOperations::MGROPS_DESTRUCT_FUNCTOR); + } + } + + void Copy(const function_detail& other) + { + if (other.HaveManager()) + { + (void)(*other.mMgrFuncPtr)(static_cast(&mStorage), + const_cast(static_cast(&other.mStorage)), + Base::ManagerOperations::MGROPS_COPY_FUNCTOR); + } + + mMgrFuncPtr = other.mMgrFuncPtr; + mInvokeFuncPtr = other.mInvokeFuncPtr; + } + + void Move(function_detail&& other) + { + if (other.HaveManager()) + { + (void)(*other.mMgrFuncPtr)(static_cast(&mStorage), static_cast(&other.mStorage), + Base::ManagerOperations::MGROPS_MOVE_FUNCTOR); + } + + mMgrFuncPtr = other.mMgrFuncPtr; + mInvokeFuncPtr = other.mInvokeFuncPtr; + other.mMgrFuncPtr = nullptr; + other.mInvokeFuncPtr = &DefaultInvoker; + } + + template + void CreateForwardFunctor(Functor&& functor) + { + using DecayedFunctorType = typename eastl::decay::type; + using FunctionManagerType = typename Base::template function_manager; + + if (internal::is_null(functor)) + { + mMgrFuncPtr = nullptr; + mInvokeFuncPtr = &DefaultInvoker; + } + else + { + mMgrFuncPtr = &FunctionManagerType::Manager; + mInvokeFuncPtr = &FunctionManagerType::Invoker; + FunctionManagerType::CreateFunctor(mStorage, eastl::forward(functor)); + } + } + + private: + typedef void* (*ManagerFuncPtr)(void*, void*, typename Base::ManagerOperations); + typedef R (*InvokeFuncPtr)(Args..., const FunctorStorageType&); + + EA_DISABLE_GCC_WARNING(-Wreturn-type); + EA_DISABLE_CLANG_WARNING(-Wreturn-type); + EA_DISABLE_VC_WARNING(4716); // 'function' must return a value + // We cannot assume that R is default constructible. + // This function is called only when the function object CANNOT be called because it is empty, + // it will always throw or assert so we never use the return value anyways and neither should the caller. + static R DefaultInvoker(Args... /*args*/, const FunctorStorageType& /*functor*/) + { + #if EASTL_EXCEPTIONS_ENABLED + throw eastl::bad_function_call(); + #else + EASTL_ASSERT_MSG(false, "function_detail call on an empty function_detail"); + #endif + }; + EA_RESTORE_VC_WARNING(); + EA_RESTORE_CLANG_WARNING(); + EA_RESTORE_GCC_WARNING(); + + + ManagerFuncPtr mMgrFuncPtr = nullptr; + InvokeFuncPtr mInvokeFuncPtr = &DefaultInvoker; + }; + + template + struct extract_signature_from_callable; + + #define EASTL_EXTRACT_SIGNATURE_PATTERN(QUALIFIERS) \ + template \ + struct extract_signature_from_callable \ + { \ + using type = ReturnType(Args...); \ + }; + + EASTL_GENERATE_MEMBER_FUNCTION_VARIANTS(EASTL_EXTRACT_SIGNATURE_PATTERN) + + // Helper + template + using extract_signature_from_callable_t = typename extract_signature_from_callable::type; + + } // namespace internal + +} // namespace eastl + +#endif // EASTL_FUNCTION_DETAIL_H diff --git a/external/EASTL/include/EASTL/internal/function_help.h b/external/EASTL/include/EASTL/internal/function_help.h new file mode 100644 index 00000000..04481d37 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/function_help.h @@ -0,0 +1,51 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_INTERNAL_FUNCTION_HELP_H +#define EASTL_INTERNAL_FUNCTION_HELP_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +#include +#include + +namespace eastl +{ + namespace internal + { + + ////////////////////////////////////////////////////////////////////// + // is_null + // + template + bool is_null(const T&) + { + return false; + } + + template + bool is_null(Result (*const& function_pointer)(Arguments...)) + { + return function_pointer == nullptr; + } + + template + bool is_null(Result (Class::*const& function_pointer)(Arguments...)) + { + return function_pointer == nullptr; + } + + template + bool is_null(Result (Class::*const& function_pointer)(Arguments...) const) + { + return function_pointer == nullptr; + } + + } // namespace internal +} // namespace eastl + +#endif // Header include guard + diff --git a/external/EASTL/include/EASTL/internal/functional_base.h b/external/EASTL/include/EASTL/internal/functional_base.h new file mode 100644 index 00000000..4fe1c3c1 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/functional_base.h @@ -0,0 +1,427 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTERNAL_FUNCTIONAL_BASE_H +#define EASTL_INTERNAL_FUNCTIONAL_BASE_H + +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +#include +#include +#include + + +namespace eastl +{ + // foward declaration for swap + template + inline void swap(T& a, T& b) + EA_NOEXCEPT_IF(eastl::is_nothrow_move_constructible::value && eastl::is_nothrow_move_assignable::value); + + + /// invoke + /// + /// invoke is a generalized function-call operator which works on function pointers, member function + /// pointers, callable objects and member pointers. + /// + /// For (member/non-member) function pointers and callable objects, it returns the result of calling + /// the function/object with the specified arguments. For member data pointers, it simply returns + /// the member. + /// + /// Note that there are also reference_wrapper specializations of invoke, which need to be defined + /// later since reference_wrapper uses invoke in its implementation. Those are defined immediately + /// after the definition of reference_wrapper. + /// + /// http://en.cppreference.com/w/cpp/utility/functional/invoke + /// + template + EA_CONSTEXPR auto invoke_impl(R C::*func, T&& obj, Args&&... args) EA_NOEXCEPT_IF(EA_NOEXCEPT_EXPR((eastl::forward(obj).*func)(eastl::forward(args)...))) + -> typename enable_if>::value, + decltype((eastl::forward(obj).*func)(eastl::forward(args)...))>::type + { + return (eastl::forward(obj).*func)(eastl::forward(args)...); + } + + template + EA_CONSTEXPR auto invoke_impl(F&& func, Args&&... args) EA_NOEXCEPT_IF(EA_NOEXCEPT_EXPR(eastl::forward(func)(eastl::forward(args)...))) + -> decltype(eastl::forward(func)(eastl::forward(args)...)) + { + return eastl::forward(func)(eastl::forward(args)...); + } + + + template + EA_CONSTEXPR auto invoke_impl(R C::*func, T&& obj, Args&&... args) EA_NOEXCEPT_IF(EA_NOEXCEPT_EXPR(((*eastl::forward(obj)).*func)(eastl::forward(args)...))) + -> decltype(((*eastl::forward(obj)).*func)(eastl::forward(args)...)) + { + return ((*eastl::forward(obj)).*func)(eastl::forward(args)...); + } + + template + EA_CONSTEXPR auto invoke_impl(M C::*member, T&& obj) EA_NOEXCEPT_IF(EA_NOEXCEPT_EXPR(eastl::forward(obj).*member)) + -> typename enable_if< + is_base_of>::value, + decltype(eastl::forward(obj).*member) + >::type + { + return eastl::forward(obj).*member; + } + + template + EA_CONSTEXPR auto invoke_impl(M C::*member, T&& obj) EA_NOEXCEPT_IF(EA_NOEXCEPT_EXPR((*eastl::forward(obj)).*member)) + -> decltype((*eastl::forward(obj)).*member) + { + return (*eastl::forward(obj)).*member; + } + + template + EA_CONSTEXPR decltype(auto) invoke(F&& func, Args&&... args) EA_NOEXCEPT_IF(EA_NOEXCEPT_EXPR(invoke_impl(eastl::forward(func), eastl::forward(args)...))) + { + return invoke_impl(eastl::forward(func), eastl::forward(args)...); + } + + template + struct invoke_result_impl { + }; + + template + struct invoke_result_impl(), eastl::declval()...))>, Args...> + { + typedef decltype(invoke_impl(eastl::declval(), eastl::declval()...)) type; + }; + + template + struct invoke_result : public invoke_result_impl {}; + + #if !defined(EA_COMPILER_NO_TEMPLATE_ALIASES) + template + using invoke_result_t = typename invoke_result::type; + #endif + + template + struct is_invocable_impl : public eastl::false_type {}; + + template + struct is_invocable_impl::type>, Args...> : public eastl::true_type {}; + + template + struct is_invocable : public is_invocable_impl {}; + + template + struct is_invocable_r_impl : public eastl::false_type {}; + + template + struct is_invocable_r_impl::type>, Args...> + : public disjunction::type, R>, + is_same::type, void>> {}; + + template + struct is_invocable_r : public is_invocable_r_impl {}; + + template + EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR bool is_invocable_v = is_invocable::value; + + template + EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR bool is_invocable_r_v = is_invocable_r::value; + + template + struct is_nothrow_invocable_impl : public eastl::false_type {}; + + template + struct is_nothrow_invocable_impl::type>, Args...> + : public eastl::bool_constant(), eastl::declval()...))> {}; + + template + struct is_nothrow_invocable : public is_nothrow_invocable_impl {}; + + template + struct is_nothrow_invocable_r_impl : public eastl::false_type {}; + + template + struct is_nothrow_invocable_r_impl::type>, Args...> + { + static EA_CONSTEXPR_OR_CONST bool value = eastl::is_convertible::type, R>::value + && eastl::is_nothrow_invocable::value; + }; + + template + struct is_nothrow_invocable_r : public is_nothrow_invocable_r_impl {}; + + template + EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR bool is_no_throw_invocable_v = is_nothrow_invocable::value; + + template + EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR bool is_nothrow_invocable_r_v = is_nothrow_invocable_r::value; + + /// allocator_arg_t + /// + /// allocator_arg_t is an empty class type used to disambiguate the overloads of + /// constructors and member functions of allocator-aware objects, including tuple, + /// function, promise, and packaged_task. + /// http://en.cppreference.com/w/cpp/memory/allocator_arg_t + /// + struct allocator_arg_t + {}; + + + /// allocator_arg + /// + /// allocator_arg is a constant of type allocator_arg_t used to disambiguate, at call site, + /// the overloads of the constructors and member functions of allocator-aware objects, + /// such as tuple, function, promise, and packaged_task. + /// http://en.cppreference.com/w/cpp/memory/allocator_arg + /// + EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR allocator_arg_t allocator_arg = allocator_arg_t(); + + + template + struct EASTL_REMOVE_AT_2024_APRIL unary_function + { + EASTL_REMOVE_AT_2024_APRIL typedef Argument argument_type; + EASTL_REMOVE_AT_2024_APRIL typedef Result result_type; + }; + + + template + struct EASTL_REMOVE_AT_2024_APRIL binary_function + { + EASTL_REMOVE_AT_2024_APRIL typedef Argument1 first_argument_type; + EASTL_REMOVE_AT_2024_APRIL typedef Argument2 second_argument_type; + EASTL_REMOVE_AT_2024_APRIL typedef Result result_type; + }; + + + /// less + template + struct less + { + EA_CPP14_CONSTEXPR bool operator()(const T& a, const T& b) const + { return a < b; } + }; + + // http://en.cppreference.com/w/cpp/utility/functional/less_void + template <> + struct less + { + template + EA_CPP14_CONSTEXPR auto operator()(A&& a, B&& b) const + -> decltype(eastl::forward(a) < eastl::forward(b)) + { return eastl::forward(a) < eastl::forward(b); } + }; + + + /// reference_wrapper + template + class reference_wrapper + { + public: + typedef T type; + + reference_wrapper(T&) EA_NOEXCEPT; + reference_wrapper(T&&) = delete; + reference_wrapper(const reference_wrapper& x) EA_NOEXCEPT; + + reference_wrapper& operator=(const reference_wrapper& x) EA_NOEXCEPT; + + operator T& () const EA_NOEXCEPT; + T& get() const EA_NOEXCEPT; + + template + typename eastl::invoke_result::type operator() (ArgTypes&&...) const; + + private: + T* val; + }; + + template + reference_wrapper::reference_wrapper(T &v) EA_NOEXCEPT + : val(eastl::addressof(v)) + {} + + template + reference_wrapper::reference_wrapper(const reference_wrapper& other) EA_NOEXCEPT + : val(other.val) + {} + + template + reference_wrapper& reference_wrapper::operator=(const reference_wrapper& other) EA_NOEXCEPT + { + val = other.val; + return *this; + } + + template + reference_wrapper::operator T&() const EA_NOEXCEPT + { + return *val; + } + + template + T& reference_wrapper::get() const EA_NOEXCEPT + { + return *val; + } + + template + template + typename eastl::invoke_result::type reference_wrapper::operator() (ArgTypes&&... args) const + { + return eastl::invoke(*val, eastl::forward(args)...); + } + + // reference_wrapper-specific utilties + template + reference_wrapper ref(T& t) EA_NOEXCEPT + { + return eastl::reference_wrapper(t); + } + + template + void ref(const T&&) = delete; + + template + reference_wrapper ref(reference_wrappert) EA_NOEXCEPT + { + return eastl::ref(t.get()); + } + + template + reference_wrapper cref(const T& t) EA_NOEXCEPT + { + return eastl::reference_wrapper(t); + } + + template + void cref(const T&&) = delete; + + template + reference_wrapper cref(reference_wrapper t) EA_NOEXCEPT + { + return eastl::cref(t.get()); + } + + + // reference_wrapper-specific type traits + template + struct is_reference_wrapper_helper + : public eastl::false_type {}; + + template + struct is_reference_wrapper_helper > + : public eastl::true_type {}; + + template + struct is_reference_wrapper + : public eastl::is_reference_wrapper_helper::type> {}; + + + // Helper which adds a reference to a type when given a reference_wrapper of that type. + template + struct remove_reference_wrapper + { typedef T type; }; + + template + struct remove_reference_wrapper< eastl::reference_wrapper > + { typedef T& type; }; + + template + struct remove_reference_wrapper< const eastl::reference_wrapper > + { typedef T& type; }; + + // reference_wrapper specializations of invoke + // These have to come after reference_wrapper is defined, but reference_wrapper needs to have a + // definition of invoke, so these specializations need to come after everything else has been defined. + template + EA_CONSTEXPR auto invoke_impl(R C::*func, T&& obj, Args&&... args) EA_NOEXCEPT_IF(EA_NOEXCEPT_EXPR((obj.get().*func)(eastl::forward(args)...))) + -> typename enable_if>::value, + decltype((obj.get().*func)(eastl::forward(args)...))>::type + { + return (obj.get().*func)(eastl::forward(args)...); + } + + template + EA_CONSTEXPR auto invoke_impl(M C::*member, T&& obj) EA_NOEXCEPT_IF(EA_NOEXCEPT_EXPR(obj.get().*member)) + -> typename enable_if>::value, + decltype(obj.get().*member)>::type + { + return obj.get().*member; + } + + + /////////////////////////////////////////////////////////////////////// + // bind + /////////////////////////////////////////////////////////////////////// + + /// bind1st + /// + EASTL_INTERNAL_DISABLE_DEPRECATED() // unariy_function is deprecated + template + class EASTL_REMOVE_AT_2024_APRIL binder1st : public unary_function + { + protected: + typename Operation::first_argument_type value; + Operation op; + + public: + binder1st(const Operation& x, const typename Operation::first_argument_type& y) + : value(y), op(x) { } + + typename Operation::result_type operator()(const typename Operation::second_argument_type& x) const + { return op(value, x); } + + typename Operation::result_type operator()(typename Operation::second_argument_type& x) const + { return op(value, x); } + }; + EASTL_INTERNAL_RESTORE_DEPRECATED() + + + EASTL_INTERNAL_DISABLE_DEPRECATED() // 'eastl::binder1st': was declared deprecated + template + EASTL_REMOVE_AT_2024_APRIL inline binder1st bind1st(const Operation& op, const T& x) + { + typedef typename Operation::first_argument_type value; + return binder1st(op, value(x)); + } + EASTL_INTERNAL_RESTORE_DEPRECATED() + + /// bind2nd + /// + EASTL_INTERNAL_DISABLE_DEPRECATED() // unariy_function is deprecated + template + class EASTL_REMOVE_AT_2024_APRIL binder2nd : public unary_function + { + protected: + Operation op; + typename Operation::second_argument_type value; + + public: + binder2nd(const Operation& x, const typename Operation::second_argument_type& y) + : op(x), value(y) { } + + typename Operation::result_type operator()(const typename Operation::first_argument_type& x) const + { return op(x, value); } + + typename Operation::result_type operator()(typename Operation::first_argument_type& x) const + { return op(x, value); } + }; + EASTL_INTERNAL_RESTORE_DEPRECATED() + + + EASTL_INTERNAL_DISABLE_DEPRECATED() // 'eastl::binder2nd': was declared deprecated + template + EASTL_REMOVE_AT_2024_APRIL inline binder2nd bind2nd(const Operation& op, const T& x) + { + typedef typename Operation::second_argument_type value; + return binder2nd(op, value(x)); + } + EASTL_INTERNAL_RESTORE_DEPRECATED() + +} // namespace eastl + +#endif // EASTL_INTERNAL_FUNCTIONAL_BASE_H diff --git a/external/EASTL/include/EASTL/internal/generic_iterator.h b/external/EASTL/include/EASTL/internal/generic_iterator.h new file mode 100644 index 00000000..5d894cbe --- /dev/null +++ b/external/EASTL/include/EASTL/internal/generic_iterator.h @@ -0,0 +1,221 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// Implements a generic iterator from a given iteratable type, such as a pointer. +// We cannot put this file into our own iterator.h file because we need to +// still be able to use this file when we have our iterator.h disabled. +// +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTERNAL_GENERIC_ITERATOR_H +#define EASTL_INTERNAL_GENERIC_ITERATOR_H + + +#include +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +#include +#include +#include + +// There is no warning number 'number'. +// Member template functions cannot be used for copy-assignment or copy-construction. +EA_DISABLE_VC_WARNING(4619 4217); + + +namespace eastl +{ + EASTL_INTERNAL_DISABLE_DEPRECATED() // 'generic_iterator': was declared deprecated + + /// generic_iterator + /// + /// Converts something which can be iterated into a formal iterator. + /// While this class' primary purpose is to allow the conversion of + /// a pointer to an iterator, you can convert anything else to an + /// iterator by defining an iterator_traits<> specialization for that + /// object type. See EASTL iterator.h for this. + /// + /// Example usage: + /// typedef generic_iterator IntArrayIterator; + /// typedef generic_iterator IntArrayIteratorOther; + /// + template + class EASTL_REMOVE_AT_2024_SEPT generic_iterator + { + protected: + Iterator mIterator; + + public: + typedef typename eastl::iterator_traits::iterator_category iterator_category; + typedef typename eastl::iterator_traits::value_type value_type; + typedef typename eastl::iterator_traits::difference_type difference_type; + typedef typename eastl::iterator_traits::reference reference; + typedef typename eastl::iterator_traits::pointer pointer; + typedef Iterator iterator_type; + typedef Container container_type; + typedef generic_iterator this_type; + + generic_iterator() + : mIterator(iterator_type()) { } + + explicit generic_iterator(const iterator_type& x) + : mIterator(x) { } + + this_type& operator=(const iterator_type& x) + { mIterator = x; return *this; } + + template + generic_iterator(const generic_iterator& x) + : mIterator(x.base()) { } + + reference operator*() const + { return *mIterator; } + + pointer operator->() const + { return mIterator; } + + this_type& operator++() + { ++mIterator; return *this; } + + this_type operator++(int) + { return this_type(mIterator++); } + + this_type& operator--() + { --mIterator; return *this; } + + this_type operator--(int) + { return this_type(mIterator--); } + + reference operator[](const difference_type& n) const + { return mIterator[n]; } + + this_type& operator+=(const difference_type& n) + { mIterator += n; return *this; } + + this_type operator+(const difference_type& n) const + { return this_type(mIterator + n); } + + this_type& operator-=(const difference_type& n) + { mIterator -= n; return *this; } + + this_type operator-(const difference_type& n) const + { return this_type(mIterator - n); } + + const iterator_type& base() const + { return mIterator; } + + private: + // Unwrapping interface, not part of the public API. + const iterator_type& unwrap() const + { return mIterator; } + + // The unwrapper helpers need access to unwrap(). + friend is_iterator_wrapper_helper; + friend is_iterator_wrapper; + + }; // class generic_iterator + + + template + inline bool operator==(const generic_iterator& lhs, const generic_iterator& rhs) + { return lhs.base() == rhs.base(); } + + template + inline bool operator==(const generic_iterator& lhs, const generic_iterator& rhs) + { return lhs.base() == rhs.base(); } + + template + inline bool operator!=(const generic_iterator& lhs, const generic_iterator& rhs) + { return lhs.base() != rhs.base(); } + + template + inline bool operator!=(const generic_iterator& lhs, const generic_iterator& rhs) + { return lhs.base() != rhs.base(); } + + template + inline bool operator<(const generic_iterator& lhs, const generic_iterator& rhs) + { return lhs.base() < rhs.base(); } + + template + inline bool operator<(const generic_iterator& lhs, const generic_iterator& rhs) + { return lhs.base() < rhs.base(); } + + template + inline bool operator>(const generic_iterator& lhs, const generic_iterator& rhs) + { return lhs.base() > rhs.base(); } + + template + inline bool operator>(const generic_iterator& lhs, const generic_iterator& rhs) + { return lhs.base() > rhs.base(); } + + template + inline bool operator<=(const generic_iterator& lhs, const generic_iterator& rhs) + { return lhs.base() <= rhs.base(); } + + template + inline bool operator<=(const generic_iterator& lhs, const generic_iterator& rhs) + { return lhs.base() <= rhs.base(); } + + template + inline bool operator>=(const generic_iterator& lhs, const generic_iterator& rhs) + { return lhs.base() >= rhs.base(); } + + template + inline bool operator>=(const generic_iterator& lhs, const generic_iterator& rhs) + { return lhs.base() >= rhs.base(); } + + template + inline typename generic_iterator::difference_type + operator-(const generic_iterator& lhs, const generic_iterator& rhs) + { return lhs.base() - rhs.base(); } + + template + inline generic_iterator + operator+(typename generic_iterator::difference_type n, const generic_iterator& x) + { return generic_iterator(x.base() + n); } + + + + /// is_generic_iterator + /// + /// Tells if an iterator is one of these generic_iterators. This is useful if you want to + /// write code that uses miscellaneous iterators but wants to tell if they are generic_iterators. + /// A primary reason to do so is that you can get at the pointer within the generic_iterator. + /// + template + struct EASTL_REMOVE_AT_2024_SEPT is_generic_iterator : public false_type { }; + + template + struct EASTL_REMOVE_AT_2024_SEPT is_generic_iterator > : public true_type { }; + + + /// unwrap_generic_iterator + /// + /// Returns `it.base()` if it's a generic_iterator, else returns `it` as-is. + /// + /// Example usage: + /// vector intVector; + /// eastl::generic_iterator::iterator> genericIterator(intVector.begin()); + /// vector::iterator it = unwrap_generic_iterator(genericIterator); + /// + template + EASTL_REMOVE_AT_2024_SEPT inline typename eastl::is_iterator_wrapper_helper::value>::iterator_type unwrap_generic_iterator(Iterator it) + { + // get_unwrapped(it) -> it.unwrap() which is equivalent to `it.base()` for generic_iterator and to `it` otherwise. + return eastl::is_iterator_wrapper_helper::value>::get_unwrapped(it); + } + + EASTL_INTERNAL_RESTORE_DEPRECATED() + +} // namespace eastl + + +EA_RESTORE_VC_WARNING(); + + +#endif // Header include guard diff --git a/external/EASTL/include/EASTL/internal/hashtable.h b/external/EASTL/include/EASTL/internal/hashtable.h new file mode 100644 index 00000000..0e121462 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/hashtable.h @@ -0,0 +1,3069 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// This file implements a hashtable, much like the C++11 unordered_set/unordered_map. +// proposed classes. +// The primary distinctions between this hashtable and C++11 unordered containers are: +// - hashtable is savvy to an environment that doesn't have exception handling, +// as is sometimes the case with console or embedded environments. +// - hashtable is slightly more space-efficient than a conventional std hashtable +// implementation on platforms with 64 bit size_t. This is +// because std STL uses size_t (64 bits) in data structures whereby 32 bits +// of data would be fine. +// - hashtable can contain objects with alignment requirements. TR1 hash tables +// cannot do so without a bit of tedious non-portable effort. +// - hashtable supports debug memory naming natively. +// - hashtable provides a find function that lets you specify a type that is +// different from the hash table key type. This is particularly useful for +// the storing of string objects but finding them by char pointers. +// - hashtable provides a lower level insert function which lets the caller +// specify the hash code and optionally the node instance. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTERNAL_HASHTABLE_H +#define EASTL_INTERNAL_HASHTABLE_H + + +#include +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +EA_DISABLE_ALL_VC_WARNINGS() + #include + #include +EA_RESTORE_ALL_VC_WARNINGS() + +// 4512/4626 - 'class' : assignment operator could not be generated. +// 4530 - C++ exception handler used, but unwind semantics are not enabled. Specify /EHsc +// 4571 - catch(...) semantics changed since Visual C++ 7.1; structured exceptions (SEH) are no longer caught. +EA_DISABLE_VC_WARNING(4512 4626 4530 4571); + + +namespace eastl +{ + + /// EASTL_HASHTABLE_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// + #ifndef EASTL_HASHTABLE_DEFAULT_NAME + #define EASTL_HASHTABLE_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " hashtable" // Unless the user overrides something, this is "EASTL hashtable". + #endif + + + /// EASTL_HASHTABLE_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_HASHTABLE_DEFAULT_ALLOCATOR + #define EASTL_HASHTABLE_DEFAULT_ALLOCATOR allocator_type(EASTL_HASHTABLE_DEFAULT_NAME) + #endif + + + /// kHashtableAllocFlagBuckets + /// Flag to allocator which indicates that we are allocating buckets and not nodes. + enum { kHashtableAllocFlagBuckets = 0x00400000 }; + + + /// gpEmptyBucketArray + /// + /// A shared representation of an empty hash table. This is present so that + /// a new empty hashtable allocates no memory. It has two entries, one for + /// the first lone empty (NULL) bucket, and one for the non-NULL trailing sentinel. + /// + extern EASTL_API void* gpEmptyBucketArray[2]; + + + /// EASTL_MACRO_SWAP + /// + /// Use EASTL_MACRO_SWAP because GCC (at least v4.6-4.8) has a bug where it fails to compile eastl::swap(mpBucketArray, x.mpBucketArray). + /// + #define EASTL_MACRO_SWAP(Type, a, b) \ + { Type temp = a; a = b; b = temp; } + + + /// hash_node + /// + /// A hash_node stores an element in a hash table, much like a + /// linked list node stores an element in a linked list. + /// A hash_node additionally can, via template parameter, + /// store a hash code in the node to speed up hash calculations + /// and comparisons in some cases. + /// + template + struct hash_node; + + EA_DISABLE_VC_WARNING(4625 4626) // "copy constructor / assignment operator could not be generated because a base class copy constructor is inaccessible or deleted" + #ifdef EA_COMPILER_MSVC_2015 + EA_DISABLE_VC_WARNING(5026) // disable warning: "move constructor was implicitly defined as deleted" + #endif + template + struct hash_node + { + hash_node() = default; + hash_node(const hash_node&) = default; + hash_node(hash_node&&) = default; + + Value mValue; + hash_node* mpNext; + eastl_size_t mnHashCode; // See config.h for the definition of eastl_size_t, which defaults to size_t. + } EASTL_MAY_ALIAS; + + template + struct hash_node + { + hash_node() = default; + hash_node(const hash_node&) = default; + hash_node(hash_node&&) = default; + + Value mValue; + hash_node* mpNext; + } EASTL_MAY_ALIAS; + + #ifdef EA_COMPILER_MSVC_2015 + EA_RESTORE_VC_WARNING() + #endif + EA_RESTORE_VC_WARNING() + + + // has_hashcode_member + // + // Custom type-trait that checks for the existence of a class data member 'mnHashCode'. + // + // In order to explicitly instantiate the hashtable without error we need to SFINAE away the functions that will + // fail to compile based on if the 'hash_node' contains a 'mnHashCode' member dictated by the hashtable template + // parameters. The hashtable support this level of configuration to allow users to choose which between the space vs. + // time optimization. + // + namespace Internal + { + template + struct has_hashcode_member + { + private: + template static eastl::no_type test(...); + template static eastl::yes_type test(decltype(U::mnHashCode)* = 0); + public: + static const bool value = sizeof(test(0)) == sizeof(eastl::yes_type); + }; + } + + static_assert(Internal::has_hashcode_member>::value, "contains a mnHashCode member"); + static_assert(!Internal::has_hashcode_member>::value, "doesn't contain a mnHashCode member"); + + // convenience macros to increase the readability of the code paths that must SFINAE on if the 'hash_node' + // contains the cached hashed value or not. + #define ENABLE_IF_HAS_HASHCODE(T, RT) typename eastl::enable_if::value, RT>::type* + #define ENABLE_IF_HASHCODE_EASTLSIZET(T, RT) typename eastl::enable_if::value, RT>::type + #define ENABLE_IF_TRUETYPE(T) typename eastl::enable_if::type* + #define DISABLE_IF_TRUETYPE(T) typename eastl::enable_if::type* + + + /// node_iterator_base + /// + /// Node iterators iterate nodes within a given bucket. + /// + /// We define a base class here because it is shared by both const and + /// non-const iterators. + /// + template + struct node_iterator_base + { + typedef hash_node node_type; + + node_type* mpNode; + + node_iterator_base(node_type* pNode) + : mpNode(pNode) { } + + void increment() + { mpNode = mpNode->mpNext; } + }; + + + + /// node_iterator + /// + /// Node iterators iterate nodes within a given bucket. + /// + /// The bConst parameter defines if the iterator is a const_iterator + /// or an iterator. + /// + template + struct node_iterator : public node_iterator_base + { + public: + typedef node_iterator_base base_type; + typedef node_iterator this_type; + typedef typename base_type::node_type node_type; + typedef Value value_type; + typedef typename conditional::type pointer; + typedef typename conditional::type reference; + typedef ptrdiff_t difference_type; + typedef EASTL_ITC_NS::forward_iterator_tag iterator_category; + + public: + explicit node_iterator(node_type* pNode = NULL) + : base_type(pNode) { } + + node_iterator(const node_iterator& x) + : base_type(x.mpNode) { } + + reference operator*() const + { return base_type::mpNode->mValue; } + + pointer operator->() const + { return &(base_type::mpNode->mValue); } + + node_iterator& operator++() + { base_type::increment(); return *this; } + + node_iterator operator++(int) + { node_iterator temp(*this); base_type::increment(); return temp; } + + }; // node_iterator + + + + /// hashtable_iterator_base + /// + /// A hashtable_iterator iterates the entire hash table and not just + /// nodes within a single bucket. Users in general will use a hash + /// table iterator much more often, as it is much like other container + /// iterators (e.g. vector::iterator). + /// + /// We define a base class here because it is shared by both const and + /// non-const iterators. + /// + template + struct hashtable_iterator_base + { + public: + typedef hashtable_iterator_base this_type; + typedef hash_node node_type; + + protected: + template + friend class hashtable; + + template + friend struct hashtable_iterator; + + template + friend bool operator==(const hashtable_iterator_base&, const hashtable_iterator_base&); + + template + friend bool operator!=(const hashtable_iterator_base&, const hashtable_iterator_base&); + + node_type* mpNode; // Current node within current bucket. + node_type** mpBucket; // Current bucket. + + public: + hashtable_iterator_base(node_type* pNode, node_type** pBucket) + : mpNode(pNode), mpBucket(pBucket) { } + + void increment_bucket() + { + ++mpBucket; + while(*mpBucket == NULL) // We store an extra bucket with some non-NULL value at the end + ++mpBucket; // of the bucket array so that finding the end of the bucket + mpNode = *mpBucket; // array is quick and simple. + } + + void increment() + { + mpNode = mpNode->mpNext; + + while(mpNode == NULL) + mpNode = *++mpBucket; + } + + }; // hashtable_iterator_base + + + + + /// hashtable_iterator + /// + /// A hashtable_iterator iterates the entire hash table and not just + /// nodes within a single bucket. Users in general will use a hash + /// table iterator much more often, as it is much like other container + /// iterators (e.g. vector::iterator). + /// + /// The bConst parameter defines if the iterator is a const_iterator + /// or an iterator. + /// + template + struct hashtable_iterator : public hashtable_iterator_base + { + public: + typedef hashtable_iterator_base base_type; + typedef hashtable_iterator this_type; + typedef hashtable_iterator this_type_non_const; + typedef typename base_type::node_type node_type; + typedef Value value_type; + typedef typename conditional::type pointer; + typedef typename conditional::type reference; + typedef ptrdiff_t difference_type; + typedef EASTL_ITC_NS::forward_iterator_tag iterator_category; + + public: + hashtable_iterator(node_type* pNode = NULL, node_type** pBucket = NULL) + : base_type(pNode, pBucket) { } + + hashtable_iterator(node_type** pBucket) + : base_type(*pBucket, pBucket) { } + + template ::type = 0> + hashtable_iterator(const this_type_non_const& x) + : base_type(x.mpNode, x.mpBucket) { } + + hashtable_iterator(const hashtable_iterator&) = default; + hashtable_iterator(hashtable_iterator&&) = default; + hashtable_iterator& operator=(const hashtable_iterator&) = default; + hashtable_iterator& operator=(hashtable_iterator&&) = default; + + reference operator*() const + { return base_type::mpNode->mValue; } + + pointer operator->() const + { return &(base_type::mpNode->mValue); } + + hashtable_iterator& operator++() + { base_type::increment(); return *this; } + + hashtable_iterator operator++(int) + { hashtable_iterator temp(*this); base_type::increment(); return temp; } + + const node_type* get_node() const + { return base_type::mpNode; } + + }; // hashtable_iterator + + + + + /// ht_distance + /// + /// This function returns the same thing as distance() for + /// forward iterators but returns zero for input iterators. + /// The reason why is that input iterators can only be read + /// once, and calling distance() on an input iterator destroys + /// the ability to read it. This ht_distance is used only for + /// optimization and so the code will merely work better with + /// forward iterators that input iterators. + /// + template + inline typename eastl::iterator_traits::difference_type + distance_fw_impl(Iterator /*first*/, Iterator /*last*/, EASTL_ITC_NS::input_iterator_tag) + { + return 0; + } + + template + inline typename eastl::iterator_traits::difference_type + distance_fw_impl(Iterator first, Iterator last, EASTL_ITC_NS::forward_iterator_tag) + { return eastl::distance(first, last); } + + template + inline typename eastl::iterator_traits::difference_type + ht_distance(Iterator first, Iterator last) + { + typedef typename eastl::iterator_traits::iterator_category IC; + return distance_fw_impl(first, last, IC()); + } + + + + + /// mod_range_hashing + /// + /// Implements the algorithm for conversion of a number in the range of + /// [0, SIZE_T_MAX] to the range of [0, BucketCount). + /// + struct mod_range_hashing + { + uint32_t operator()(size_t r, uint32_t n) const + { return r % n; } + }; + + + /// default_ranged_hash + /// + /// Default ranged hash function H. In principle it should be a + /// function object composed from objects of type H1 and H2 such that + /// h(k, n) = h2(h1(k), n), but that would mean making extra copies of + /// h1 and h2. So instead we'll just use a tag to tell class template + /// hashtable to do that composition. + /// + struct default_ranged_hash{ }; + + + /// prime_rehash_policy + /// + /// Default value for rehash policy. Bucket size is (usually) the + /// smallest prime that keeps the load factor small enough. + /// + struct EASTL_API prime_rehash_policy + { + public: + float mfMaxLoadFactor; + float mfGrowthFactor; + mutable uint32_t mnNextResize; + + public: + prime_rehash_policy(float fMaxLoadFactor = 1.f) + : mfMaxLoadFactor(fMaxLoadFactor), mfGrowthFactor(2.f), mnNextResize(0) { } + + float GetMaxLoadFactor() const + { return mfMaxLoadFactor; } + + /// Return a bucket count no greater than nBucketCountHint, + /// Don't update member variables while at it. + static uint32_t GetPrevBucketCountOnly(uint32_t nBucketCountHint); + + /// Return a bucket count no greater than nBucketCountHint. + /// This function has a side effect of updating mnNextResize. + uint32_t GetPrevBucketCount(uint32_t nBucketCountHint) const; + + /// Return a bucket count no smaller than nBucketCountHint. + /// This function has a side effect of updating mnNextResize. + uint32_t GetNextBucketCount(uint32_t nBucketCountHint) const; + + /// Return a bucket count appropriate for nElementCount elements. + /// This function has a side effect of updating mnNextResize. + uint32_t GetBucketCount(uint32_t nElementCount) const; + + /// nBucketCount is current bucket count, nElementCount is current element count, + /// and nElementAdd is number of elements to be inserted. Do we need + /// to increase bucket count? If so, return pair(true, n), where + /// n is the new bucket count. If not, return pair(false, 0). + eastl::pair + GetRehashRequired(uint32_t nBucketCount, uint32_t nElementCount, uint32_t nElementAdd) const; + }; + + + + + + /////////////////////////////////////////////////////////////////////// + // Base classes for hashtable. We define these base classes because + // in some cases we want to do different things depending on the + // value of a policy class. In some cases the policy class affects + // which member functions and nested typedefs are defined; we handle that + // by specializing base class templates. Several of the base class templates + // need to access other members of class template hashtable, so we use + // the "curiously recurring template pattern" (parent class is templated + // on type of child class) for them. + /////////////////////////////////////////////////////////////////////// + + + /// rehash_base + /// + /// Give hashtable the get_max_load_factor functions if the rehash + /// policy is prime_rehash_policy. + /// + template + struct rehash_base { }; + + template + struct rehash_base + { + // Returns the max load factor, which is the load factor beyond + // which we rebuild the container with a new bucket count. + float get_max_load_factor() const + { + const Hashtable* const pThis = static_cast(this); + return pThis->rehash_policy().GetMaxLoadFactor(); + } + + // If you want to make the hashtable never rehash (resize), + // set the max load factor to be a very high number (e.g. 100000.f). + void set_max_load_factor(float fMaxLoadFactor) + { + Hashtable* const pThis = static_cast(this); + pThis->rehash_policy(prime_rehash_policy(fMaxLoadFactor)); + } + }; + + + + + /// hash_code_base + /// + /// Encapsulates two policy issues that aren't quite orthogonal. + /// (1) The difference between using a ranged hash function and using + /// the combination of a hash function and a range-hashing function. + /// In the former case we don't have such things as hash codes, so + /// we have a dummy type as placeholder. + /// (2) Whether or not we cache hash codes. Caching hash codes is + /// meaningless if we have a ranged hash function. This is because + /// a ranged hash function converts an object directly to its + /// bucket index without ostensibly using a hash code. + /// We also put the key extraction and equality comparison function + /// objects here, for convenience. + /// + template + struct hash_code_base; + + + /// hash_code_base + /// + /// Specialization: ranged hash function, no caching hash codes. + /// H1 and H2 are provided but ignored. We define a dummy hash code type. + /// + template + struct hash_code_base + { + protected: + ExtractKey mExtractKey; // To do: Make this member go away entirely, as it never has any data. + Equal mEqual; // To do: Make this instance use zero space when it is zero size. + H mRangedHash; // To do: Make this instance use zero space when it is zero size + + public: + H1 hash_function() const + { return H1(); } + + EASTL_REMOVE_AT_2024_APRIL Equal equal_function() const // Deprecated. Use key_eq() instead, as key_eq is what the new C++ standard + { return mEqual; } // has specified in its hashtable (unordered_*) proposal. + + const Equal& key_eq() const + { return mEqual; } + + Equal& key_eq() + { return mEqual; } + + protected: + typedef void* hash_code_t; + typedef uint32_t bucket_index_t; + + hash_code_base(const ExtractKey& extractKey, const Equal& eq, const H1&, const H2&, const H& h) + : mExtractKey(extractKey), mEqual(eq), mRangedHash(h) { } + + hash_code_t get_hash_code(const Key& key) const + { + EA_UNUSED(key); + return NULL; + } + + bucket_index_t bucket_index(hash_code_t, uint32_t) const + { return (bucket_index_t)0; } + + bucket_index_t bucket_index(const Key& key, hash_code_t, uint32_t nBucketCount) const + { return (bucket_index_t)mRangedHash(key, nBucketCount); } + + bucket_index_t bucket_index(const hash_node* pNode, uint32_t nBucketCount) const + { return (bucket_index_t)mRangedHash(mExtractKey(pNode->mValue), nBucketCount); } + + bool compare(const Key& key, hash_code_t, hash_node* pNode) const + { return mEqual(key, mExtractKey(pNode->mValue)); } + + void copy_code(hash_node*, const hash_node*) const + { } // Nothing to do. + + void set_code(hash_node* pDest, hash_code_t c) const + { + EA_UNUSED(pDest); + EA_UNUSED(c); + } + + void base_swap(hash_code_base& x) + { + eastl::swap(mExtractKey, x.mExtractKey); + eastl::swap(mEqual, x.mEqual); + eastl::swap(mRangedHash, x.mRangedHash); + } + + }; // hash_code_base + + + + // No specialization for ranged hash function while caching hash codes. + // That combination is meaningless, and trying to do it is an error. + + + /// hash_code_base + /// + /// Specialization: ranged hash function, cache hash codes. + /// This combination is meaningless, so we provide only a declaration + /// and no definition. + /// + template + struct hash_code_base; + + + + /// hash_code_base + /// + /// Specialization: hash function and range-hashing function, + /// no caching of hash codes. H is provided but ignored. + /// Provides typedef and accessor required by TR1. + /// + template + struct hash_code_base + { + protected: + ExtractKey mExtractKey; + Equal mEqual; + H1 m_h1; + H2 m_h2; + + public: + typedef H1 hasher; + + H1 hash_function() const + { return m_h1; } + + EASTL_REMOVE_AT_2024_APRIL Equal equal_function() const // Deprecated. Use key_eq() instead, as key_eq is what the new C++ standard + { return mEqual; } // has specified in its hashtable (unordered_*) proposal. + + const Equal& key_eq() const + { return mEqual; } + + Equal& key_eq() + { return mEqual; } + + protected: + typedef size_t hash_code_t; + typedef uint32_t bucket_index_t; + typedef hash_node node_type; + + hash_code_base(const ExtractKey& ex, const Equal& eq, const H1& h1, const H2& h2, const default_ranged_hash&) + : mExtractKey(ex), mEqual(eq), m_h1(h1), m_h2(h2) { } + + hash_code_t get_hash_code(const Key& key) const + { return (hash_code_t)m_h1(key); } + + bucket_index_t bucket_index(hash_code_t c, uint32_t nBucketCount) const + { return (bucket_index_t)m_h2(c, nBucketCount); } + + bucket_index_t bucket_index(const Key&, hash_code_t c, uint32_t nBucketCount) const + { return (bucket_index_t)m_h2(c, nBucketCount); } + + bucket_index_t bucket_index(const node_type* pNode, uint32_t nBucketCount) const + { return (bucket_index_t)m_h2((hash_code_t)m_h1(mExtractKey(pNode->mValue)), nBucketCount); } + + bool compare(const Key& key, hash_code_t, node_type* pNode) const + { return mEqual(key, mExtractKey(pNode->mValue)); } + + void copy_code(node_type*, const node_type*) const + { } // Nothing to do. + + void set_code(node_type*, hash_code_t) const + { } // Nothing to do. + + void base_swap(hash_code_base& x) + { + eastl::swap(mExtractKey, x.mExtractKey); + eastl::swap(mEqual, x.mEqual); + eastl::swap(m_h1, x.m_h1); + eastl::swap(m_h2, x.m_h2); + } + + }; // hash_code_base + + + + /// hash_code_base + /// + /// Specialization: hash function and range-hashing function, + /// caching hash codes. H is provided but ignored. + /// Provides typedef and accessor required by TR1. + /// + template + struct hash_code_base + { + protected: + ExtractKey mExtractKey; + Equal mEqual; + H1 m_h1; + H2 m_h2; + + public: + typedef H1 hasher; + + H1 hash_function() const + { return m_h1; } + + EASTL_REMOVE_AT_2024_APRIL Equal equal_function() const // Deprecated. Use key_eq() instead, as key_eq is what the new C++ standard + { return mEqual; } // has specified in its hashtable (unordered_*) proposal. + + const Equal& key_eq() const + { return mEqual; } + + Equal& key_eq() + { return mEqual; } + + protected: + typedef uint32_t hash_code_t; + typedef uint32_t bucket_index_t; + typedef hash_node node_type; + + hash_code_base(const ExtractKey& ex, const Equal& eq, const H1& h1, const H2& h2, const default_ranged_hash&) + : mExtractKey(ex), mEqual(eq), m_h1(h1), m_h2(h2) { } + + hash_code_t get_hash_code(const Key& key) const + { return (hash_code_t)m_h1(key); } + + bucket_index_t bucket_index(hash_code_t c, uint32_t nBucketCount) const + { return (bucket_index_t)m_h2(c, nBucketCount); } + + bucket_index_t bucket_index(const Key&, hash_code_t c, uint32_t nBucketCount) const + { return (bucket_index_t)m_h2(c, nBucketCount); } + + bucket_index_t bucket_index(const node_type* pNode, uint32_t nBucketCount) const + { return (bucket_index_t)m_h2((uint32_t)pNode->mnHashCode, nBucketCount); } + + bool compare(const Key& key, hash_code_t c, node_type* pNode) const + { return (pNode->mnHashCode == c) && mEqual(key, mExtractKey(pNode->mValue)); } + + void copy_code(node_type* pDest, const node_type* pSource) const + { pDest->mnHashCode = pSource->mnHashCode; } + + void set_code(node_type* pDest, hash_code_t c) const + { pDest->mnHashCode = c; } + + void base_swap(hash_code_base& x) + { + eastl::swap(mExtractKey, x.mExtractKey); + eastl::swap(mEqual, x.mEqual); + eastl::swap(m_h1, x.m_h1); + eastl::swap(m_h2, x.m_h2); + } + + }; // hash_code_base + + + + + + /////////////////////////////////////////////////////////////////////////// + /// hashtable + /// + /// Key and Value: arbitrary CopyConstructible types. + /// + /// ExtractKey: function object that takes a object of type Value + /// and returns a value of type Key. + /// + /// Equal: function object that takes two objects of type k and returns + /// a bool-like value that is true if the two objects are considered equal. + /// + /// H1: a hash function. A unary function object with argument type + /// Key and result type size_t. Return values should be distributed + /// over the entire range [0, numeric_limits::max()]. + /// + /// H2: a range-hashing function (in the terminology of Tavori and + /// Dreizin). This is a function which takes the output of H1 and + /// converts it to the range of [0, n]. Usually it merely takes the + /// output of H1 and mods it to n. + /// + /// H: a ranged hash function (Tavori and Dreizin). This is merely + /// a class that combines the functionality of H1 and H2 together, + /// possibly in some way that is somehow improved over H1 and H2 + /// It is a binary function whose argument types are Key and size_t + /// and whose result type is uint32_t. Given arguments k and n, the + /// return value is in the range [0, n). Default: h(k, n) = h2(h1(k), n). + /// If H is anything other than the default, H1 and H2 are ignored, + /// as H is thus overriding H1 and H2. + /// + /// RehashPolicy: Policy class with three members, all of which govern + /// the bucket count. nBucket(n) returns a bucket count no smaller + /// than n. GetBucketCount(n) returns a bucket count appropriate + /// for an element count of n. GetRehashRequired(nBucketCount, nElementCount, nElementAdd) + /// determines whether, if the current bucket count is nBucket and the + /// current element count is nElementCount, we need to increase the bucket + /// count. If so, returns pair(true, n), where n is the new + /// bucket count. If not, returns pair(false, ). + /// + /// Currently it is hard-wired that the number of buckets never + /// shrinks. Should we allow RehashPolicy to change that? + /// + /// bCacheHashCode: true if we store the value of the hash + /// function along with the value. This is a time-space tradeoff. + /// Storing it may improve lookup speed by reducing the number of + /// times we need to call the Equal function. + /// + /// bMutableIterators: true if hashtable::iterator is a mutable + /// iterator, false if iterator and const_iterator are both const + /// iterators. This is true for hash_map and hash_multimap, + /// false for hash_set and hash_multiset. + /// + /// bUniqueKeys: true if the return value of hashtable::count(k) + /// is always at most one, false if it may be an arbitrary number. + /// This is true for hash_set and hash_map and is false for + /// hash_multiset and hash_multimap. + /// + /////////////////////////////////////////////////////////////////////// + /// Note: + /// If you want to make a hashtable never increase its bucket usage, + /// call set_max_load_factor with a very high value such as 100000.f. + /// + /// find_as + /// In order to support the ability to have a hashtable of strings but + /// be able to do efficiently lookups via char pointers (i.e. so they + /// aren't converted to string objects), we provide the find_as + /// function. This function allows you to do a find with a key of a + /// type other than the hashtable key type. See the find_as function + /// for more documentation on this. + /// + /// find_by_hash + /// In the interest of supporting fast operations wherever possible, + /// we provide a find_by_hash function which finds a node using its + /// hash code. This is useful for cases where the node's hash is + /// already known, allowing us to avoid a redundant hash operation + /// in the normal find path. + /// + template + class hashtable + : public rehash_base >, + public hash_code_base + { + public: + typedef Key key_type; + typedef Value value_type; + typedef typename ExtractKey::result_type mapped_type; + typedef hash_code_base hash_code_base_type; + typedef typename hash_code_base_type::hash_code_t hash_code_t; + typedef Allocator allocator_type; + typedef Equal key_equal; + typedef ptrdiff_t difference_type; + typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t. + typedef value_type& reference; + typedef const value_type& const_reference; + typedef node_iterator local_iterator; + typedef node_iterator const_local_iterator; + typedef hashtable_iterator iterator; + typedef hashtable_iterator const_iterator; + typedef hash_node node_type; + typedef typename conditional, iterator>::type insert_return_type; + typedef hashtable this_type; + typedef RehashPolicy rehash_policy_type; + typedef ExtractKey extract_key_type; + typedef H1 h1_type; + typedef H2 h2_type; + typedef H h_type; + typedef integral_constant has_unique_keys_type; + + using hash_code_base_type::key_eq; + using hash_code_base_type::hash_function; + using hash_code_base_type::mExtractKey; + using hash_code_base_type::get_hash_code; + using hash_code_base_type::bucket_index; + using hash_code_base_type::compare; + using hash_code_base_type::set_code; + using hash_code_base_type::copy_code; + + static const bool kCacheHashCode = bCacheHashCode; + + enum + { + // This enumeration is deprecated in favor of eastl::kHashtableAllocFlagBuckets. + kAllocFlagBuckets EASTL_REMOVE_AT_2024_APRIL = eastl::kHashtableAllocFlagBuckets // Flag to allocator which indicates that we are allocating buckets and not nodes. + }; + + protected: + node_type** mpBucketArray; + size_type mnBucketCount; + size_type mnElementCount; + RehashPolicy mRehashPolicy; // To do: Use base class optimization to make this go away. + allocator_type mAllocator; // To do: Use base class optimization to make this go away. + + struct NodeFindKeyData { + node_type* node; + hash_code_t code; + size_type bucket_index; + }; + + public: + hashtable(size_type nBucketCount, const H1&, const H2&, const H&, const Equal&, const ExtractKey&, + const allocator_type& allocator = EASTL_HASHTABLE_DEFAULT_ALLOCATOR); + + // note: standard only requires InputIterator. + template + hashtable(FowardIterator first, FowardIterator last, size_type nBucketCount, + const H1&, const H2&, const H&, const Equal&, const ExtractKey&, + const allocator_type& allocator = EASTL_HASHTABLE_DEFAULT_ALLOCATOR); + + hashtable(const hashtable& x); + + // initializer_list ctor support is implemented in subclasses (e.g. hash_set). + // hashtable(initializer_list, size_type nBucketCount, const H1&, const H2&, const H&, + // const Equal&, const ExtractKey&, const allocator_type& allocator = EASTL_HASHTABLE_DEFAULT_ALLOCATOR); + + hashtable(this_type&& x); + hashtable(this_type&& x, const allocator_type& allocator); + ~hashtable(); + + const allocator_type& get_allocator() const EA_NOEXCEPT; + allocator_type& get_allocator() EA_NOEXCEPT; + void set_allocator(const allocator_type& allocator); + + this_type& operator=(const this_type& x); + this_type& operator=(std::initializer_list ilist); + this_type& operator=(this_type&& x); + + void swap(this_type& x); + + iterator begin() EA_NOEXCEPT + { + iterator i(mpBucketArray); + if(!i.mpNode) + i.increment_bucket(); + return i; + } + + const_iterator begin() const EA_NOEXCEPT + { + const_iterator i(mpBucketArray); + if(!i.mpNode) + i.increment_bucket(); + return i; + } + + const_iterator cbegin() const EA_NOEXCEPT + { return begin(); } + + iterator end() EA_NOEXCEPT + { return iterator(mpBucketArray + mnBucketCount); } + + const_iterator end() const EA_NOEXCEPT + { return const_iterator(mpBucketArray + mnBucketCount); } + + const_iterator cend() const EA_NOEXCEPT + { return const_iterator(mpBucketArray + mnBucketCount); } + + // Returns an iterator to the first item in bucket n. + local_iterator begin(size_type n) EA_NOEXCEPT + { return local_iterator(mpBucketArray[n]); } + + const_local_iterator begin(size_type n) const EA_NOEXCEPT + { return const_local_iterator(mpBucketArray[n]); } + + const_local_iterator cbegin(size_type n) const EA_NOEXCEPT + { return const_local_iterator(mpBucketArray[n]); } + + // Returns an iterator to the last item in a bucket returned by begin(n). + local_iterator end(size_type) EA_NOEXCEPT + { return local_iterator(NULL); } + + const_local_iterator end(size_type) const EA_NOEXCEPT + { return const_local_iterator(NULL); } + + const_local_iterator cend(size_type) const EA_NOEXCEPT + { return const_local_iterator(NULL); } + + bool empty() const EA_NOEXCEPT + { return mnElementCount == 0; } + + size_type size() const EA_NOEXCEPT + { return mnElementCount; } + + // size_type max_size() const EA_NOEXCEPT; + + size_type bucket_count() const EA_NOEXCEPT + { return mnBucketCount; } + + // size_type max_bucket_count() const; + + size_type bucket_size(size_type n) const EA_NOEXCEPT + { return (size_type)eastl::distance(begin(n), end(n)); } + + //size_type bucket(const key_type& k) const EA_NOEXCEPT + // { return bucket_index(k, (hash code here), (uint32_t)mnBucketCount); } + + // Returns the ratio of element count to bucket count. A return value of 1 means + // there's an optimal 1 bucket for each element. + float load_factor() const EA_NOEXCEPT + { return (float)mnElementCount / (float)mnBucketCount; } + + // float max_load_factor() const; + // void max_load_factor( float ml ); + + // Inherited from the base class. + // Returns the max load factor, which is the load factor beyond + // which we rebuild the container with a new bucket count. + // get_max_load_factor comes from rehash_base. + // float get_max_load_factor() const; + + // Inherited from the base class. + // If you want to make the hashtable never rehash (resize), + // set the max load factor to be a very high number (e.g. 100000.f). + // set_max_load_factor comes from rehash_base. + // void set_max_load_factor(float fMaxLoadFactor); + + /// Generalization of get_max_load_factor. This is an extension that's + /// not present in C++ hash tables (unordered containers). + const rehash_policy_type& rehash_policy() const EA_NOEXCEPT + { return mRehashPolicy; } + + /// Generalization of set_max_load_factor. This is an extension that's + /// not present in C++ hash tables (unordered containers). + void rehash_policy(const rehash_policy_type& rehashPolicy); + + template + insert_return_type emplace(Args&&... args); + + template + iterator emplace_hint(const_iterator position, Args&&... args); + + insert_return_type insert(const value_type& value); + insert_return_type insert(value_type&& otherValue); + // template + // insert_return_type insert(P&& value); // sfinae: is_constructible::value + iterator insert(const_iterator hint, const value_type& value); + iterator insert(const_iterator hint, value_type&& value); + // template + // insert_return_type insert(const_iterator hint, P&& value); // sfinae: is_constructible::value + void insert(std::initializer_list ilist); + template void insert(InputIterator first, InputIterator last); + //insert_return_type insert(node_type&& nh); + //iterator insert(const_iterator hint, node_type&& nh); + + // This overload attempts to mitigate the overhead associated with mismatched cv-quality elements of + // the hashtable pair. It can avoid copy overhead because it will perfect forward the user provided pair types + // until it can constructed in-place in the allocated hashtable node. + // + // Ideally we would remove this overload as it deprecated and removed in C++17 but it currently causes + // performance regressions for hashtables with complex keys (keys that allocate resources). + EASTL_INTERNAL_DISABLE_DEPRECATED() // 'is_literal_type_v

': was declared deprecated + template , key_type> && + #endif + !eastl::is_literal_type_v

&& + eastl::is_constructible_v>> + insert_return_type insert(P&& otherValue); + EASTL_INTERNAL_RESTORE_DEPRECATED() + + // Non-standard extension + template // See comments below for the const value_type& equivalent to this function. + insert_return_type insert(hash_code_t c, node_type* pNodeNew, P&& otherValue); + + // We provide a version of insert which lets the caller directly specify the hash value and + // a potential node to insert if needed. This allows for less thread contention in the case + // of a thread-shared hash table that's accessed during a mutex lock, because the hash calculation + // and node creation is done outside of the lock. If pNodeNew is supplied by the user (i.e. non-NULL) + // then it must be freeable via the hash table's allocator. If the return value is true then this function + // took over ownership of pNodeNew, else pNodeNew is still owned by the caller to free or to pass + // to another call to insert. pNodeNew need not be assigned the value by the caller, as the insert + // function will assign value to pNodeNew upon insertion into the hash table. pNodeNew may be + // created by the user with the allocate_uninitialized_node function, and freed by the free_uninitialized_node function. + insert_return_type insert(hash_code_t c, node_type* pNodeNew, const value_type& value); + + template eastl::pair insert_or_assign(const key_type& k, M&& obj); + template eastl::pair insert_or_assign(key_type&& k, M&& obj); + template iterator insert_or_assign(const_iterator hint, const key_type& k, M&& obj); + template iterator insert_or_assign(const_iterator hint, key_type&& k, M&& obj); + + // Used to allocate and free memory used by insert(const value_type& value, hash_code_t c, node_type* pNodeNew). + node_type* allocate_uninitialized_node(); + void free_uninitialized_node(node_type* pNode); + + iterator erase(const_iterator position); + iterator erase(const_iterator first, const_iterator last); + size_type erase(const key_type& k); + + void clear(); + void clear(bool clearBuckets); // If clearBuckets is true, we free the bucket memory and set the bucket count back to the newly constructed count. + void reset_lose_memory() EA_NOEXCEPT; // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs. + void rehash(size_type nBucketCount); + void reserve(size_type nElementCount); + + iterator find(const key_type& key); + const_iterator find(const key_type& key) const; + + // missing transparent key support: + // template + // iterator find(const K& key); + // template + // const_iterator find(const K& key) const; + + /// Implements a find whereby the user supplies a comparison of a different type + /// than the hashtable value_type. A useful case of this is one whereby you have + /// a container of string objects but want to do searches via passing in char pointers. + /// The problem is that without this kind of find, you need to do the expensive operation + /// of converting the char pointer to a string so it can be used as the argument to the + /// find function. + /// + /// Example usage (namespaces omitted for brevity): + /// hash_set hashSet; + /// hashSet.find_as("hello"); // Use default hash and compare. + /// + /// Example usage (note that the predicate uses string as first type and char* as second): + /// hash_set hashSet; + /// hashSet.find_as("hello", hash(), equal_to<>()); + /// + template + iterator find_as(const U& u, UHash uhash, BinaryPredicate predicate); + + template + const_iterator find_as(const U& u, UHash uhash, BinaryPredicate predicate) const; + + template + iterator find_as(const U& u); + + template + const_iterator find_as(const U& u) const; + + // Note: find_by_hash and find_range_by_hash both perform a search based on a hash value. + // It is important to note that multiple hash values may map to the same hash bucket, so + // it would be incorrect to assume all items returned match the hash value that + // was searched for. + + /// Implements a find whereby the user supplies the node's hash code. + /// It returns an iterator to the first element that matches the given hash. However, there may be multiple elements that match the given hash. + + template + ENABLE_IF_HASHCODE_EASTLSIZET(HashCodeT, iterator) find_by_hash(HashCodeT c) + { + EASTL_CT_ASSERT_MSG(bCacheHashCode, + "find_by_hash(hash_code_t c) is designed to avoid recomputing hashes, " + "so it requires cached hash codes. Consider setting template parameter " + "bCacheHashCode to true or using find_by_hash(const key_type& k, hash_code_t c) instead."); + + const size_type n = (size_type)bucket_index(c, (uint32_t)mnBucketCount); + + node_type* const pNode = DoFindNode(mpBucketArray[n], c); + + return pNode ? iterator(pNode, mpBucketArray + n) : + iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end() + } + + template + ENABLE_IF_HASHCODE_EASTLSIZET(HashCodeT, const_iterator) find_by_hash(HashCodeT c) const + { + EASTL_CT_ASSERT_MSG(bCacheHashCode, + "find_by_hash(hash_code_t c) is designed to avoid recomputing hashes, " + "so it requires cached hash codes. Consider setting template parameter " + "bCacheHashCode to true or using find_by_hash(const key_type& k, hash_code_t c) instead."); + + const size_type n = (size_type)bucket_index(c, (uint32_t)mnBucketCount); + + node_type* const pNode = DoFindNode(mpBucketArray[n], c); + + return pNode ? + const_iterator(pNode, mpBucketArray + n) : + const_iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end() + } + + iterator find_by_hash(const key_type& k, hash_code_t c) + { + const size_type n = (size_type)bucket_index(c, (uint32_t)mnBucketCount); + + node_type* const pNode = DoFindNode(mpBucketArray[n], k, c); + return pNode ? iterator(pNode, mpBucketArray + n) : iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end() + } + + const_iterator find_by_hash(const key_type& k, hash_code_t c) const + { + const size_type n = (size_type)bucket_index(c, (uint32_t)mnBucketCount); + + node_type* const pNode = DoFindNode(mpBucketArray[n], k, c); + return pNode ? const_iterator(pNode, mpBucketArray + n) : const_iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end() + } + + // Returns a pair that allows iterating over all nodes in a hash bucket + // first in the pair returned holds the iterator for the beginning of the bucket, + // second in the pair returned holds the iterator for the end of the bucket, + // If no bucket is found, both values in the pair are set to end(). + // + // See also the note above. + eastl::pair find_range_by_hash(hash_code_t c); + eastl::pair find_range_by_hash(hash_code_t c) const; + + size_type count(const key_type& k) const EA_NOEXCEPT; + + // transparent key support: + // template + // size_type count(const K& k) const; + + eastl::pair equal_range(const key_type& k); + eastl::pair equal_range(const key_type& k) const; + + // transparent key support: + // template + // eastl::pair equal_range(const K& k); + // template + // eastl::pair equal_range(const K& k) const; + + bool validate() const; + int validate_iterator(const_iterator i) const; + + protected: + // We must remove one of the 'DoGetResultIterator' overloads from the overload-set (via SFINAE) because both can + // not compile successfully at the same time. The 'bUniqueKeys' template parameter chooses at compile-time the + // type of 'insert_return_type' between a pair and a raw iterator. We must pick between the two + // overloads that unpacks the iterator from the pair or simply passes the provided iterator to the caller based + // on the class template parameter. + template + iterator DoGetResultIterator(BoolConstantT, + const insert_return_type& irt, + ENABLE_IF_TRUETYPE(BoolConstantT) = nullptr) const EA_NOEXCEPT + { + return irt.first; + } + + template + iterator DoGetResultIterator(BoolConstantT, + const insert_return_type& irt, + DISABLE_IF_TRUETYPE(BoolConstantT) = nullptr) const EA_NOEXCEPT + { + return irt; + } + + node_type* DoAllocateNodeFromKey(const key_type& key); + node_type* DoAllocateNodeFromKey(key_type&& key); + void DoFreeNode(node_type* pNode); + void DoFreeNodes(node_type** pBucketArray, size_type); + + node_type** DoAllocateBuckets(size_type n); + void DoFreeBuckets(node_type** pBucketArray, size_type n); + + template , ENABLE_IF_TRUETYPE(Enabled) = nullptr> // only enabled when keys are unique + eastl::pair DoInsertUniqueNode(const key_type& k, hash_code_t c, size_type n, node_type* pNodeNew); + + template + eastl::pair DoInsertValue(BoolConstantT, Args&&... args); + + template + iterator DoInsertValue(BoolConstantT, Args&&... args); + + + template + eastl::pair DoInsertValueExtra(BoolConstantT, + const key_type& k, + hash_code_t c, + node_type* pNodeNew, + value_type&& value, + ENABLE_IF_TRUETYPE(BoolConstantT) = nullptr); + + template + eastl::pair DoInsertValue(BoolConstantT, + value_type&& value, + ENABLE_IF_TRUETYPE(BoolConstantT) = nullptr); + + template + iterator DoInsertValueExtra(BoolConstantT, + const key_type& k, + hash_code_t c, + node_type* pNodeNew, + value_type&& value, + DISABLE_IF_TRUETYPE(BoolConstantT) = nullptr); + + template + iterator DoInsertValue(BoolConstantT, value_type&& value, DISABLE_IF_TRUETYPE(BoolConstantT) = nullptr); + + + template + eastl::pair DoInsertValueExtra(BoolConstantT, + const key_type& k, + hash_code_t c, + node_type* pNodeNew, + const value_type& value, + ENABLE_IF_TRUETYPE(BoolConstantT) = nullptr); + + template + eastl::pair DoInsertValue(BoolConstantT, + const value_type& value, + ENABLE_IF_TRUETYPE(BoolConstantT) = nullptr); + + template + iterator DoInsertValueExtra(BoolConstantT, + const key_type& k, + hash_code_t c, + node_type* pNodeNew, + const value_type& value, + DISABLE_IF_TRUETYPE(BoolConstantT) = nullptr); + + template + iterator DoInsertValue(BoolConstantT, const value_type& value, DISABLE_IF_TRUETYPE(BoolConstantT) = nullptr); + + template + node_type* DoAllocateNode(Args&&... args); + node_type* DoAllocateNode(value_type&& value); + node_type* DoAllocateNode(const value_type& value); + + // DoInsertKey is supposed to get hash_code_t c = get_hash_code(key). + // it is done in case application has it's own hashset/hashmap-like containter, where hash code is for some reason known prior the insert + // this allows to save some performance, especially with heavy hash functions + eastl::pair DoInsertKey(true_type, const key_type& key, hash_code_t c); + iterator DoInsertKey(false_type, const key_type& key, hash_code_t c); + + // We keep DoInsertKey overload without third parameter, for compatibility with older revisions of EASTL (3.12.07 and earlier) + // It used to call get_hash_code as a first call inside the DoInsertKey. + eastl::pair DoInsertKey(true_type, const key_type& key) { return DoInsertKey(true_type(), key, get_hash_code(key)); } + iterator DoInsertKey(false_type, const key_type& key) { return DoInsertKey(false_type(), key, get_hash_code(key)); } + + void DoRehash(size_type nBucketCount); + node_type* DoFindNode(node_type* pNode, const key_type& k, hash_code_t c) const; + NodeFindKeyData DoFindKeyData(const key_type& k) const; + + template + ENABLE_IF_HAS_HASHCODE(T, node_type) DoFindNode(T* pNode, hash_code_t c) const + { + for (; pNode; pNode = pNode->mpNext) + { + if (pNode->mnHashCode == c) + return pNode; + } + return NULL; + } + + template + node_type* DoFindNodeT(node_type* pNode, const U& u, BinaryPredicate predicate) const; + + private: + template , ENABLE_IF_TRUETYPE(Enabled) = nullptr> + eastl::pair DoInsertValueExtraForwarding(const key_type& k, + hash_code_t c, + node_type* pNodeNew, + V&& value); + + + }; // class hashtable + + + + + + /////////////////////////////////////////////////////////////////////// + // node_iterator_base + /////////////////////////////////////////////////////////////////////// + + template + inline bool operator==(const node_iterator_base& a, const node_iterator_base& b) + { return a.mpNode == b.mpNode; } + + template + inline bool operator!=(const node_iterator_base& a, const node_iterator_base& b) + { return a.mpNode != b.mpNode; } + + + + + /////////////////////////////////////////////////////////////////////// + // hashtable_iterator_base + /////////////////////////////////////////////////////////////////////// + + template + inline bool operator==(const hashtable_iterator_base& a, const hashtable_iterator_base& b) + { return a.mpNode == b.mpNode; } + + template + inline bool operator!=(const hashtable_iterator_base& a, const hashtable_iterator_base& b) + { return a.mpNode != b.mpNode; } + + + + + /////////////////////////////////////////////////////////////////////// + // hashtable + /////////////////////////////////////////////////////////////////////// + + template + hashtable + ::hashtable(size_type nBucketCount, const H1& h1, const H2& h2, const H& h, + const Eq& eq, const EK& ek, const allocator_type& allocator) + : rehash_base(), + hash_code_base(ek, eq, h1, h2, h), + mnBucketCount(0), + mnElementCount(0), + mRehashPolicy(), + mAllocator(allocator) + { + if(nBucketCount < 2) // If we are starting in an initially empty state, with no memory allocation done. + reset_lose_memory(); + else // Else we are creating a potentially non-empty hashtable... + { + EASTL_ASSERT(nBucketCount < 10000000); + mnBucketCount = (size_type)mRehashPolicy.GetNextBucketCount((uint32_t)nBucketCount); + mpBucketArray = DoAllocateBuckets(mnBucketCount); // mnBucketCount will always be at least 2. + } + } + + + + template + template + hashtable::hashtable(FowardIterator first, FowardIterator last, size_type nBucketCount, + const H1& h1, const H2& h2, const H& h, + const Eq& eq, const EK& ek, const allocator_type& allocator) + : rehash_base(), + hash_code_base(ek, eq, h1, h2, h), + //mnBucketCount(0), // This gets re-assigned below. + mnElementCount(0), + mRehashPolicy(), + mAllocator(allocator) + { + if(nBucketCount < 2) + { + const size_type nElementCount = (size_type)eastl::ht_distance(first, last); + mnBucketCount = (size_type)mRehashPolicy.GetBucketCount((uint32_t)nElementCount); + } + else + { + EASTL_ASSERT(nBucketCount < 10000000); + mnBucketCount = nBucketCount; + } + + mpBucketArray = DoAllocateBuckets(mnBucketCount); // mnBucketCount will always be at least 2. + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + for(; first != last; ++first) + insert(*first); + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + clear(); + DoFreeBuckets(mpBucketArray, mnBucketCount); + throw; + } + #endif + } + + + + template + hashtable::hashtable(const this_type& x) + : rehash_base(x), + hash_code_base(x), + mnBucketCount(x.mnBucketCount), + mnElementCount(x.mnElementCount), + mRehashPolicy(x.mRehashPolicy), + mAllocator(x.mAllocator) + { + if(mnElementCount) // If there is anything to copy... + { + mpBucketArray = DoAllocateBuckets(mnBucketCount); // mnBucketCount will be at least 2. + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + for(size_type i = 0; i < x.mnBucketCount; ++i) + { + node_type* pNodeSource = x.mpBucketArray[i]; + node_type** ppNodeDest = mpBucketArray + i; + + while(pNodeSource) + { + *ppNodeDest = DoAllocateNode(pNodeSource->mValue); + copy_code(*ppNodeDest, pNodeSource); + ppNodeDest = &(*ppNodeDest)->mpNext; + pNodeSource = pNodeSource->mpNext; + } + } + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + clear(); + DoFreeBuckets(mpBucketArray, mnBucketCount); + throw; + } + #endif + } + else + { + // In this case, instead of allocate memory and copy nothing from x, + // we reset ourselves to a zero allocation state. + reset_lose_memory(); + } + } + + + template + hashtable::hashtable(this_type&& x) + : rehash_base(x), + hash_code_base(x), + mnBucketCount(0), + mnElementCount(0), + mRehashPolicy(x.mRehashPolicy), + mAllocator(x.mAllocator) + { + reset_lose_memory(); // We do this here the same as we do it in the default ctor because it puts the container in a proper initial empty state. This code would be cleaner if we could rely on being able to use C++11 delegating constructors and just call the default ctor here. + swap(x); + } + + + template + hashtable::hashtable(this_type&& x, const allocator_type& allocator) + : rehash_base(x), + hash_code_base(x), + mnBucketCount(0), + mnElementCount(0), + mRehashPolicy(x.mRehashPolicy), + mAllocator(allocator) + { + reset_lose_memory(); // We do this here the same as we do it in the default ctor because it puts the container in a proper initial empty state. This code would be cleaner if we could rely on being able to use C++11 delegating constructors and just call the default ctor here. + swap(x); // swap will directly or indirectly handle the possibility that mAllocator != x.mAllocator. + } + + + template + inline const typename hashtable::allocator_type& + hashtable::get_allocator() const EA_NOEXCEPT + { + return mAllocator; + } + + + + template + inline typename hashtable::allocator_type& + hashtable::get_allocator() EA_NOEXCEPT + { + return mAllocator; + } + + + + template + inline void hashtable::set_allocator(const allocator_type& allocator) + { + mAllocator = allocator; + } + + + + template + inline typename hashtable::this_type& + hashtable::operator=(const this_type& x) + { + if(this != &x) + { + clear(); + + #if EASTL_ALLOCATOR_COPY_ENABLED + mAllocator = x.mAllocator; + #endif + + insert(x.begin(), x.end()); + } + return *this; + } + + + template + inline typename hashtable::this_type& + hashtable::operator=(this_type&& x) + { + if(this != &x) + { + clear(); // To consider: Are we really required to clear here? x is going away soon and will clear itself in its dtor. + swap(x); // member swap handles the case that x has a different allocator than our allocator by doing a copy. + } + return *this; + } + + + template + inline typename hashtable::this_type& + hashtable::operator=(std::initializer_list ilist) + { + // The simplest means of doing this is to clear and insert. There probably isn't a generic + // solution that's any more efficient without having prior knowledge of the ilist contents. + clear(); + insert(ilist.begin(), ilist.end()); + return *this; + } + + + + template + inline hashtable::~hashtable() + { + clear(); + DoFreeBuckets(mpBucketArray, mnBucketCount); + } + + + template + typename hashtable::node_type* + hashtable::DoAllocateNodeFromKey(const key_type& key) + { + node_type* const pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(node_type), 0); + EASTL_ASSERT_MSG(pNode != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined."); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + ::new(eastl::addressof(pNode->mValue)) value_type(pair_first_construct, key); + pNode->mpNext = NULL; + return pNode; + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + EASTLFree(mAllocator, pNode, sizeof(node_type)); + throw; + } + #endif + } + + + template + typename hashtable::node_type* + hashtable::DoAllocateNodeFromKey(key_type&& key) + { + node_type* const pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(node_type), 0); + EASTL_ASSERT_MSG(pNode != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined."); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + ::new(eastl::addressof(pNode->mValue)) value_type(pair_first_construct, eastl::move(key)); + pNode->mpNext = NULL; + return pNode; + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + EASTLFree(mAllocator, pNode, sizeof(node_type)); + throw; + } + #endif + } + + + template + inline void hashtable::DoFreeNode(node_type* pNode) + { + pNode->~node_type(); + EASTLFree(mAllocator, pNode, sizeof(node_type)); + } + + + + template + void hashtable::DoFreeNodes(node_type** pNodeArray, size_type n) + { + for(size_type i = 0; i < n; ++i) + { + node_type* pNode = pNodeArray[i]; + while(pNode) + { + node_type* const pTempNode = pNode; + pNode = pNode->mpNext; + DoFreeNode(pTempNode); + } + pNodeArray[i] = NULL; + } + } + + + + template + typename hashtable::node_type** + hashtable::DoAllocateBuckets(size_type n) + { + // We allocate one extra bucket to hold a sentinel, an arbitrary + // non-null pointer. Iterator increment relies on this. + EASTL_ASSERT(n > 1); // We reserve an mnBucketCount of 1 for the shared gpEmptyBucketArray. + EASTL_CT_ASSERT(kHashtableAllocFlagBuckets == 0x00400000); // Currently we expect this to be so, because the allocator has a copy of this enum. + node_type** const pBucketArray = (node_type**)EASTLAllocAlignedFlags(mAllocator, (n + 1) * sizeof(node_type*), EASTL_ALIGN_OF(node_type*), 0, kHashtableAllocFlagBuckets); + //eastl::fill(pBucketArray, pBucketArray + n, (node_type*)NULL); + memset(pBucketArray, 0, n * sizeof(node_type*)); + pBucketArray[n] = reinterpret_cast((uintptr_t)~0); + return pBucketArray; + } + + + + template + inline void hashtable::DoFreeBuckets(node_type** pBucketArray, size_type n) + { + // If n <= 1, then pBucketArray is from the shared gpEmptyBucketArray. We don't test + // for pBucketArray == &gpEmptyBucketArray because one library have a different gpEmptyBucketArray + // than another but pass a hashtable to another. So we go by the size. + if(n > 1) + EASTLFree(mAllocator, pBucketArray, (n + 1) * sizeof(node_type*)); // '+1' because DoAllocateBuckets allocates nBucketCount + 1 buckets in order to have a NULL sentinel at the end. + } + + + template + void hashtable::swap(this_type& x) + { + hash_code_base::base_swap(x); // hash_code_base has multiple implementations, so we let them handle the swap. + eastl::swap(mRehashPolicy, x.mRehashPolicy); + EASTL_MACRO_SWAP(node_type**, mpBucketArray, x.mpBucketArray); + eastl::swap(mnBucketCount, x.mnBucketCount); + eastl::swap(mnElementCount, x.mnElementCount); + + if (mAllocator != x.mAllocator) // If allocators are not equivalent... + { + eastl::swap(mAllocator, x.mAllocator); + } + } + + + template + inline void hashtable::rehash_policy(const rehash_policy_type& rehashPolicy) + { + mRehashPolicy = rehashPolicy; + + const size_type nBuckets = rehashPolicy.GetBucketCount((uint32_t)mnElementCount); + + if(nBuckets > mnBucketCount) + DoRehash(nBuckets); + } + + + + template + inline typename hashtable::iterator + hashtable::find(const key_type& k) + { + const hash_code_t c = get_hash_code(k); + const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount); + + node_type* const pNode = DoFindNode(mpBucketArray[n], k, c); + return pNode ? iterator(pNode, mpBucketArray + n) : iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end() + } + + + + template + inline typename hashtable::const_iterator + hashtable::find(const key_type& k) const + { + const hash_code_t c = get_hash_code(k); + const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount); + + node_type* const pNode = DoFindNode(mpBucketArray[n], k, c); + return pNode ? const_iterator(pNode, mpBucketArray + n) : const_iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end() + } + + + + template + template + inline typename hashtable::iterator + hashtable::find_as(const U& other, UHash uhash, BinaryPredicate predicate) + { + const hash_code_t c = (hash_code_t)uhash(other); + const size_type n = (size_type)(c % mnBucketCount); // This assumes we are using the mod range policy. + + node_type* const pNode = DoFindNodeT(mpBucketArray[n], other, predicate); + return pNode ? iterator(pNode, mpBucketArray + n) : iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end() + } + + + + template + template + inline typename hashtable::const_iterator + hashtable::find_as(const U& other, UHash uhash, BinaryPredicate predicate) const + { + const hash_code_t c = (hash_code_t)uhash(other); + const size_type n = (size_type)(c % mnBucketCount); // This assumes we are using the mod range policy. + + node_type* const pNode = DoFindNodeT(mpBucketArray[n], other, predicate); + return pNode ? const_iterator(pNode, mpBucketArray + n) : const_iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end() + } + + + /// hashtable_find + /// + /// Helper function that defaults to using hash and equal_to<>. + /// This makes it so that by default you don't need to provide these. + /// Note that the default hash functions may not be what you want, though. + /// + /// Example usage. Instead of this: + /// hash_set hashSet; + /// hashSet.find("hello", hash(), equal_to<>()); + /// + /// You can use this: + /// hash_set hashSet; + /// hashtable_find(hashSet, "hello"); + /// + template + inline typename H::iterator hashtable_find(H& hashTable, U u) + { return hashTable.find_as(u, eastl::hash(), eastl::equal_to<>()); } + + template + inline typename H::const_iterator hashtable_find(const H& hashTable, U u) + { return hashTable.find_as(u, eastl::hash(), eastl::equal_to<>()); } + + + + template + template + inline typename hashtable::iterator + hashtable::find_as(const U& other) + { return eastl::hashtable_find(*this, other); } + // VC++ doesn't appear to like the following, though it seems correct to me. + // So we implement the workaround above until we can straighten this out. + //{ return find_as(other, eastl::hash(), eastl::equal_to<>()); } + + + template + template + inline typename hashtable::const_iterator + hashtable::find_as(const U& other) const + { return eastl::hashtable_find(*this, other); } + // VC++ doesn't appear to like the following, though it seems correct to me. + // So we implement the workaround above until we can straighten this out. + //{ return find_as(other, eastl::hash(), eastl::equal_to<>()); } + + + + template + eastl::pair::const_iterator, + typename hashtable::const_iterator> + hashtable::find_range_by_hash(hash_code_t c) const + { + const size_type start = (size_type)bucket_index(c, (uint32_t)mnBucketCount); + node_type* const pNodeStart = mpBucketArray[start]; + + if (pNodeStart) + { + eastl::pair pair(const_iterator(pNodeStart, mpBucketArray + start), + const_iterator(pNodeStart, mpBucketArray + start)); + pair.second.increment_bucket(); + return pair; + } + + return eastl::pair(const_iterator(mpBucketArray + mnBucketCount), + const_iterator(mpBucketArray + mnBucketCount)); + } + + + + template + eastl::pair::iterator, + typename hashtable::iterator> + hashtable::find_range_by_hash(hash_code_t c) + { + const size_type start = (size_type)bucket_index(c, (uint32_t)mnBucketCount); + node_type* const pNodeStart = mpBucketArray[start]; + + if (pNodeStart) + { + eastl::pair pair(iterator(pNodeStart, mpBucketArray + start), + iterator(pNodeStart, mpBucketArray + start)); + pair.second.increment_bucket(); + return pair; + + } + + return eastl::pair(iterator(mpBucketArray + mnBucketCount), + iterator(mpBucketArray + mnBucketCount)); + } + + + + template + typename hashtable::size_type + hashtable::count(const key_type& k) const EA_NOEXCEPT + { + const hash_code_t c = get_hash_code(k); + const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount); + size_type result = 0; + + // To do: Make a specialization for bU (unique keys) == true and take + // advantage of the fact that the count will always be zero or one in that case. + for(node_type* pNode = mpBucketArray[n]; pNode; pNode = pNode->mpNext) + { + if(compare(k, c, pNode)) + ++result; + } + return result; + } + + + + template + eastl::pair::iterator, + typename hashtable::iterator> + hashtable::equal_range(const key_type& k) + { + const hash_code_t c = get_hash_code(k); + const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount); + node_type** head = mpBucketArray + n; + node_type* pNode = DoFindNode(*head, k, c); + + if(pNode) + { + node_type* p1 = pNode->mpNext; + + for(; p1; p1 = p1->mpNext) + { + if(!compare(k, c, p1)) + break; + } + + iterator first(pNode, head); + iterator last(p1, head); + + if(!p1) + last.increment_bucket(); + + return eastl::pair(first, last); + } + + return eastl::pair(iterator(mpBucketArray + mnBucketCount), // iterator(mpBucketArray + mnBucketCount) == end() + iterator(mpBucketArray + mnBucketCount)); + } + + + + + template + eastl::pair::const_iterator, + typename hashtable::const_iterator> + hashtable::equal_range(const key_type& k) const + { + const hash_code_t c = get_hash_code(k); + const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount); + node_type** head = mpBucketArray + n; + node_type* pNode = DoFindNode(*head, k, c); + + if(pNode) + { + node_type* p1 = pNode->mpNext; + + for(; p1; p1 = p1->mpNext) + { + if(!compare(k, c, p1)) + break; + } + + const_iterator first(pNode, head); + const_iterator last(p1, head); + + if(!p1) + last.increment_bucket(); + + return eastl::pair(first, last); + } + + return eastl::pair(const_iterator(mpBucketArray + mnBucketCount), // iterator(mpBucketArray + mnBucketCount) == end() + const_iterator(mpBucketArray + mnBucketCount)); + } + + + template + inline typename hashtable::NodeFindKeyData + hashtable::DoFindKeyData(const key_type& k) const { + NodeFindKeyData d; + d.code = get_hash_code(k); + d.bucket_index = (size_type)bucket_index(k, d.code, (uint32_t)mnBucketCount); + d.node = DoFindNode(mpBucketArray[d.bucket_index], k, d.code); + return d; + } + + template + inline typename hashtable::node_type* + hashtable::DoFindNode(node_type* pNode, const key_type& k, hash_code_t c) const + { + for(; pNode; pNode = pNode->mpNext) + { + if(compare(k, c, pNode)) + return pNode; + } + return NULL; + } + + + + template + template + inline typename hashtable::node_type* + hashtable::DoFindNodeT(node_type* pNode, const U& other, BinaryPredicate predicate) const + { + for(; pNode; pNode = pNode->mpNext) + { + if(predicate(mExtractKey(pNode->mValue), other)) // Intentionally compare with key as first arg and other as second arg. + return pNode; + } + return NULL; + } + + + template + template // only enabled when keys are unique + eastl::pair::iterator, bool> + hashtable::DoInsertUniqueNode(const key_type& k, hash_code_t c, size_type n, node_type* pNodeNew) + { + const eastl::pair bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, (uint32_t)1); + + set_code(pNodeNew, c); // This is a no-op for most hashtables. + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + if(bRehash.first) + { + n = (size_type)bucket_index(k, c, (uint32_t)bRehash.second); + DoRehash(bRehash.second); + } + + EASTL_ASSERT((uintptr_t)mpBucketArray != (uintptr_t)&gpEmptyBucketArray[0]); + pNodeNew->mpNext = mpBucketArray[n]; + mpBucketArray[n] = pNodeNew; + ++mnElementCount; + + return eastl::pair(iterator(pNodeNew, mpBucketArray + n), true); + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + EA_CONSTEXPR_IF(bDeleteOnException) { DoFreeNode(pNodeNew); } + throw; + } + #endif + } + + template + template + eastl::pair::iterator, bool> + hashtable::DoInsertValue(BoolConstantT, Args&&... args) // true_type means bUniqueKeys is true. + { + // Adds the value to the hash table if not already present. + // If already present then the existing value is returned via an iterator/bool pair. + + // We have a chicken-and-egg problem here. In order to know if and where to insert the value, we need to get the + // hashtable key for the value. But we don't explicitly have a value argument, we have a templated Args&&... argument. + // We need the value_type in order to proceed, but that entails getting an instance of a value_type from the args. + // And it may turn out that the value is already present in the hashtable and we need to cancel the insertion, + // despite having obtained a value_type to put into the hashtable. We have mitigated this problem somewhat by providing + // specializations of the insert function for const value_type& and value_type&&, and so the only time this function + // should get called is when args refers to arguments to construct a value_type. + + node_type* const pNodeNew = DoAllocateNode(eastl::forward(args)...); + const key_type& k = mExtractKey(pNodeNew->mValue); + const hash_code_t c = get_hash_code(k); + size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount); + node_type* const pNode = DoFindNode(mpBucketArray[n], k, c); + + if(pNode == NULL) // If value is not present... add it. + { + return DoInsertUniqueNode(k, c, n, pNodeNew); + } + else + { + // To do: We have an inefficiency to deal with here. We allocated a node above but we are freeing it here because + // it turned out it wasn't needed. But we needed to create the node in order to get the hashtable key for + // the node. One possible resolution is to create specializations: DoInsertValue(true_type, value_type&&) and + // DoInsertValue(true_type, const value_type&) which don't need to create a node up front in order to get the + // hashtable key. Probably most users would end up using these pathways instead of this Args... pathway. + // While we should considering handling this to-do item, a lot of the performance limitations of maps and sets + // in practice is with finding elements rather than adding (potentially redundant) new elements. + DoFreeNode(pNodeNew); + } + + return eastl::pair(iterator(pNode, mpBucketArray + n), false); + } + + + template + template + typename hashtable::iterator + hashtable::DoInsertValue(BoolConstantT, Args&&... args) // false_type means bUniqueKeys is false. + { + const eastl::pair bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, (uint32_t)1); + + if(bRehash.first) + DoRehash(bRehash.second); + + node_type* pNodeNew = DoAllocateNode(eastl::forward(args)...); + const key_type& k = mExtractKey(pNodeNew->mValue); + const hash_code_t c = get_hash_code(k); + const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount); + + set_code(pNodeNew, c); // This is a no-op for most hashtables. + + // To consider: Possibly make this insertion not make equal elements contiguous. + // As it stands now, we insert equal values contiguously in the hashtable. + // The benefit is that equal_range can work in a sensible manner and that + // erase(value) can more quickly find equal values. The downside is that + // this insertion operation taking some extra time. How important is it to + // us that equal_range span all equal items? + node_type* const pNodePrev = DoFindNode(mpBucketArray[n], k, c); + + if(pNodePrev == NULL) + { + EASTL_ASSERT((void**)mpBucketArray != &gpEmptyBucketArray[0]); + pNodeNew->mpNext = mpBucketArray[n]; + mpBucketArray[n] = pNodeNew; + } + else + { + pNodeNew->mpNext = pNodePrev->mpNext; + pNodePrev->mpNext = pNodeNew; + } + + ++mnElementCount; + + return iterator(pNodeNew, mpBucketArray + n); + } + + + template + template + typename hashtable::node_type* + hashtable::DoAllocateNode(Args&&... args) + { + node_type* const pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(node_type), 0); + EASTL_ASSERT_MSG(pNode != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined."); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + ::new(eastl::addressof(pNode->mValue)) value_type(eastl::forward(args)...); + pNode->mpNext = NULL; + return pNode; + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + EASTLFree(mAllocator, pNode, sizeof(node_type)); + throw; + } + #endif + } + + + //////////////////////////////////////////////////////////////////////////////////////////////////// + // Note: The following insertion-related functions are nearly copies of the above three functions, + // but are for value_type&& and const value_type& arguments. It's useful for us to have the functions + // below, even when using a fully compliant C++11 compiler that supports the above functions. + // The reason is because the specializations below are slightly more efficient because they can delay + // the creation of a node until it's known that it will be needed. + //////////////////////////////////////////////////////////////////////////////////////////////////// + template + template + inline eastl::pair::iterator, bool> + hashtable::DoInsertValueExtra(BoolConstantT, const key_type& k, + hash_code_t c, node_type* pNodeNew, value_type&& value, ENABLE_IF_TRUETYPE(BoolConstantT)) // true_type means bUniqueKeys is true. + { + return DoInsertValueExtraForwarding(k, c, pNodeNew, eastl::move(value)); + } + + template + template + inline eastl::pair::iterator, bool> + hashtable::DoInsertValueExtra(BoolConstantT, const key_type& k, + hash_code_t c, node_type* pNodeNew, const value_type& value, ENABLE_IF_TRUETYPE(BoolConstantT)) // true_type means bUniqueKeys is true. + { + return DoInsertValueExtraForwarding(k, c, pNodeNew, value); + } + + template + template // true_type means bUniqueKeys is true. + eastl::pair::iterator, bool> + hashtable::DoInsertValueExtraForwarding(const key_type& k, + hash_code_t c, node_type* pNodeNew, VFwd&& value) + { + // Adds the value to the hash table if not already present. + // If already present then the existing value is returned via an iterator/bool pair. + size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount); + node_type* const pNode = DoFindNode(mpBucketArray[n], k, c); + + if(pNode == NULL) // If value is not present... add it. + { + // Allocate the new node before doing the rehash so that we don't + // do a rehash if the allocation throws. + if(pNodeNew) + { + ::new(eastl::addressof(pNodeNew->mValue)) value_type(eastl::forward(value)); // It's expected that pNodeNew was allocated with allocate_uninitialized_node. + return DoInsertUniqueNode(k, c, n, pNodeNew); + } + else + { + pNodeNew = DoAllocateNode(eastl::move(value)); + return DoInsertUniqueNode(k, c, n, pNodeNew); + } + } + // Else the value is already present, so don't add a new node. And don't free pNodeNew. + + return eastl::pair(iterator(pNode, mpBucketArray + n), false); + } + + + template + template + eastl::pair::iterator, bool> + hashtable::DoInsertValue(BoolConstantT, value_type&& value, ENABLE_IF_TRUETYPE(BoolConstantT)) // true_type means bUniqueKeys is true. + { + const key_type& k = mExtractKey(value); + const hash_code_t c = get_hash_code(k); + + return DoInsertValueExtra(true_type(), k, c, NULL, eastl::move(value)); + } + + + template + template + typename hashtable::iterator + hashtable::DoInsertValueExtra(BoolConstantT, const key_type& k, hash_code_t c, node_type* pNodeNew, value_type&& value, + DISABLE_IF_TRUETYPE(BoolConstantT)) // false_type means bUniqueKeys is false. + { + const eastl::pair bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, (uint32_t)1); + + if(bRehash.first) + DoRehash(bRehash.second); // Note: We don't need to wrap this call with try/catch because there's nothing we would need to do in the catch. + + const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount); + + if(pNodeNew) + ::new(eastl::addressof(pNodeNew->mValue)) value_type(eastl::move(value)); // It's expected that pNodeNew was allocated with allocate_uninitialized_node. + else + pNodeNew = DoAllocateNode(eastl::move(value)); + + set_code(pNodeNew, c); // This is a no-op for most hashtables. + + // To consider: Possibly make this insertion not make equal elements contiguous. + // As it stands now, we insert equal values contiguously in the hashtable. + // The benefit is that equal_range can work in a sensible manner and that + // erase(value) can more quickly find equal values. The downside is that + // this insertion operation taking some extra time. How important is it to + // us that equal_range span all equal items? + node_type* const pNodePrev = DoFindNode(mpBucketArray[n], k, c); + + if(pNodePrev == NULL) + { + EASTL_ASSERT((void**)mpBucketArray != &gpEmptyBucketArray[0]); + pNodeNew->mpNext = mpBucketArray[n]; + mpBucketArray[n] = pNodeNew; + } + else + { + pNodeNew->mpNext = pNodePrev->mpNext; + pNodePrev->mpNext = pNodeNew; + } + + ++mnElementCount; + + return iterator(pNodeNew, mpBucketArray + n); + } + + + template + template + typename hashtable::iterator + hashtable::DoInsertValue(BoolConstantT, value_type&& value, DISABLE_IF_TRUETYPE(BoolConstantT)) // false_type means bUniqueKeys is false. + { + const key_type& k = mExtractKey(value); + const hash_code_t c = get_hash_code(k); + + return DoInsertValueExtra(false_type(), k, c, NULL, eastl::move(value)); + } + + + template + typename hashtable::node_type* + hashtable::DoAllocateNode(value_type&& value) + { + node_type* const pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(node_type), 0); + EASTL_ASSERT_MSG(pNode != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined."); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + ::new(eastl::addressof(pNode->mValue)) value_type(eastl::move(value)); + pNode->mpNext = NULL; + return pNode; + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + EASTLFree(mAllocator, pNode, sizeof(node_type)); + throw; + } + #endif + } + + template + template + eastl::pair::iterator, bool> + hashtable::DoInsertValue(BoolConstantT, const value_type& value, ENABLE_IF_TRUETYPE(BoolConstantT)) // true_type means bUniqueKeys is true. + { + const key_type& k = mExtractKey(value); + const hash_code_t c = get_hash_code(k); + + return DoInsertValueExtra(true_type(), k, c, NULL, value); + } + + + template + template + typename hashtable::iterator + hashtable::DoInsertValueExtra(BoolConstantT, const key_type& k, hash_code_t c, node_type* pNodeNew, const value_type& value, + DISABLE_IF_TRUETYPE(BoolConstantT)) // false_type means bUniqueKeys is false. + { + const eastl::pair bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, (uint32_t)1); + + if(bRehash.first) + DoRehash(bRehash.second); // Note: We don't need to wrap this call with try/catch because there's nothing we would need to do in the catch. + + const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount); + + if(pNodeNew) + ::new(eastl::addressof(pNodeNew->mValue)) value_type(value); // It's expected that pNodeNew was allocated with allocate_uninitialized_node. + else + pNodeNew = DoAllocateNode(value); + + set_code(pNodeNew, c); // This is a no-op for most hashtables. + + // To consider: Possibly make this insertion not make equal elements contiguous. + // As it stands now, we insert equal values contiguously in the hashtable. + // The benefit is that equal_range can work in a sensible manner and that + // erase(value) can more quickly find equal values. The downside is that + // this insertion operation taking some extra time. How important is it to + // us that equal_range span all equal items? + node_type* const pNodePrev = DoFindNode(mpBucketArray[n], k, c); + + if(pNodePrev == NULL) + { + EASTL_ASSERT((void**)mpBucketArray != &gpEmptyBucketArray[0]); + pNodeNew->mpNext = mpBucketArray[n]; + mpBucketArray[n] = pNodeNew; + } + else + { + pNodeNew->mpNext = pNodePrev->mpNext; + pNodePrev->mpNext = pNodeNew; + } + + ++mnElementCount; + + return iterator(pNodeNew, mpBucketArray + n); + } + + + template + template + typename hashtable::iterator + hashtable::DoInsertValue(BoolConstantT, const value_type& value, DISABLE_IF_TRUETYPE(BoolConstantT)) // false_type means bUniqueKeys is false. + { + const key_type& k = mExtractKey(value); + const hash_code_t c = get_hash_code(k); + + return DoInsertValueExtra(false_type(), k, c, NULL, value); + } + + + template + typename hashtable::node_type* + hashtable::DoAllocateNode(const value_type& value) + { + node_type* const pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(node_type), 0); + EASTL_ASSERT_MSG(pNode != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined."); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + ::new(eastl::addressof(pNode->mValue)) value_type(value); + pNode->mpNext = NULL; + return pNode; + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + EASTLFree(mAllocator, pNode, sizeof(node_type)); + throw; + } + #endif + } + + + template + typename hashtable::node_type* + hashtable::allocate_uninitialized_node() + { + // We don't wrap this in try/catch because users of this function are expected to do that themselves as needed. + node_type* const pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(node_type), 0); + EASTL_ASSERT_MSG(pNode != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined."); + // Leave pNode->mValue uninitialized. + pNode->mpNext = NULL; + return pNode; + } + + + template + void hashtable::free_uninitialized_node(node_type* pNode) + { + // pNode->mValue is expected to be uninitialized. + EASTLFree(mAllocator, pNode, sizeof(node_type)); + } + + + template + eastl::pair::iterator, bool> + hashtable::DoInsertKey(true_type, const key_type& key, const hash_code_t c) // true_type means bUniqueKeys is true. + { + size_type n = (size_type)bucket_index(key, c, (uint32_t)mnBucketCount); + node_type* const pNode = DoFindNode(mpBucketArray[n], key, c); + + if(pNode == NULL) + { + const eastl::pair bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, (uint32_t)1); + + // Allocate the new node before doing the rehash so that we don't + // do a rehash if the allocation throws. + node_type* const pNodeNew = DoAllocateNodeFromKey(key); + set_code(pNodeNew, c); // This is a no-op for most hashtables. + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + if(bRehash.first) + { + n = (size_type)bucket_index(key, c, (uint32_t)bRehash.second); + DoRehash(bRehash.second); + } + + EASTL_ASSERT((void**)mpBucketArray != &gpEmptyBucketArray[0]); + pNodeNew->mpNext = mpBucketArray[n]; + mpBucketArray[n] = pNodeNew; + ++mnElementCount; + + return eastl::pair(iterator(pNodeNew, mpBucketArray + n), true); + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + DoFreeNode(pNodeNew); + throw; + } + #endif + } + + return eastl::pair(iterator(pNode, mpBucketArray + n), false); + } + + + + template + typename hashtable::iterator + hashtable::DoInsertKey(false_type, const key_type& key, const hash_code_t c) // false_type means bUniqueKeys is false. + { + const eastl::pair bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, (uint32_t)1); + + if(bRehash.first) + DoRehash(bRehash.second); + + const size_type n = (size_type)bucket_index(key, c, (uint32_t)mnBucketCount); + + node_type* const pNodeNew = DoAllocateNodeFromKey(key); + set_code(pNodeNew, c); // This is a no-op for most hashtables. + + // To consider: Possibly make this insertion not make equal elements contiguous. + // As it stands now, we insert equal values contiguously in the hashtable. + // The benefit is that equal_range can work in a sensible manner and that + // erase(value) can more quickly find equal values. The downside is that + // this insertion operation taking some extra time. How important is it to + // us that equal_range span all equal items? + node_type* const pNodePrev = DoFindNode(mpBucketArray[n], key, c); + + if(pNodePrev == NULL) + { + EASTL_ASSERT((void**)mpBucketArray != &gpEmptyBucketArray[0]); + pNodeNew->mpNext = mpBucketArray[n]; + mpBucketArray[n] = pNodeNew; + } + else + { + pNodeNew->mpNext = pNodePrev->mpNext; + pNodePrev->mpNext = pNodeNew; + } + + ++mnElementCount; + + return iterator(pNodeNew, mpBucketArray + n); + } + + + template + template + typename hashtable::insert_return_type + hashtable::emplace(Args&&... args) + { + return DoInsertValue(has_unique_keys_type(), eastl::forward(args)...); // Need to use forward instead of move because Args&& is a "universal reference" instead of an rvalue reference. + } + + template + template + typename hashtable::iterator + hashtable::emplace_hint(const_iterator, Args&&... args) + { + // We currently ignore the iterator argument as a hint. + insert_return_type result = DoInsertValue(has_unique_keys_type(), eastl::forward(args)...); + return DoGetResultIterator(has_unique_keys_type(), result); + } + + template + typename hashtable::insert_return_type + hashtable::insert(value_type&& otherValue) + { + return DoInsertValue(has_unique_keys_type(), eastl::move(otherValue)); + } + + + template + template + typename hashtable::insert_return_type + hashtable::insert(hash_code_t c, node_type* pNodeNew, P&& otherValue) + { + // pNodeNew->mValue is expected to be uninitialized. + value_type value(eastl::forward

(otherValue)); // Need to use forward instead of move because P&& is a "universal reference" instead of an rvalue reference. + const key_type& k = mExtractKey(value); + return DoInsertValueExtra(has_unique_keys_type(), k, c, pNodeNew, eastl::move(value)); + } + + + template + typename hashtable::iterator + hashtable::insert(const_iterator, value_type&& value) + { + // We currently ignore the iterator argument as a hint. + insert_return_type result = DoInsertValue(has_unique_keys_type(), value_type(eastl::move(value))); + return DoGetResultIterator(has_unique_keys_type(), result); + } + + + template + typename hashtable::insert_return_type + hashtable::insert(const value_type& value) + { + return DoInsertValue(has_unique_keys_type(), value); + } + + + template + typename hashtable::insert_return_type + hashtable::insert(hash_code_t c, node_type* pNodeNew, const value_type& value) + { + // pNodeNew->mValue is expected to be uninitialized. + const key_type& k = mExtractKey(value); + return DoInsertValueExtra(has_unique_keys_type(), k, c, pNodeNew, value); + } + + + template + template + typename hashtable::insert_return_type + hashtable::insert(P&& otherValue) + { + return emplace(eastl::forward

(otherValue)); + } + + + template + typename hashtable::iterator + hashtable::insert(const_iterator, const value_type& value) + { + // We ignore the first argument (hint iterator). It's not likely to be useful for hashtable containers. + insert_return_type result = DoInsertValue(has_unique_keys_type(), value); + return DoGetResultIterator(has_unique_keys_type(), result); + } + + + template + void hashtable::insert(std::initializer_list ilist) + { + insert(ilist.begin(), ilist.end()); + } + + + template + template + void + hashtable::insert(InputIterator first, InputIterator last) + { + const uint32_t nElementAdd = (uint32_t)eastl::ht_distance(first, last); + const eastl::pair bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, nElementAdd); + + if(bRehash.first) + DoRehash(bRehash.second); + + for(; first != last; ++first) + DoInsertValue(has_unique_keys_type(), *first); + } + + + template + template + eastl::pair::iterator, bool> + hashtable::insert_or_assign(const key_type& k, M&& obj) + { + auto iter = find(k); + if(iter == end()) + { + return insert(value_type(piecewise_construct, eastl::forward_as_tuple(k), eastl::forward_as_tuple(eastl::forward(obj)))); + } + else + { + iter->second = eastl::forward(obj); + return {iter, false}; + } + } + + template + template + eastl::pair::iterator, bool> + hashtable::insert_or_assign(key_type&& k, M&& obj) + { + auto iter = find(k); + if(iter == end()) + { + return insert(value_type(piecewise_construct, eastl::forward_as_tuple(eastl::move(k)), eastl::forward_as_tuple(eastl::forward(obj)))); + } + else + { + iter->second = eastl::forward(obj); + return {iter, false}; + } + } + + template + template + typename hashtable::iterator + hashtable::insert_or_assign(const_iterator, const key_type& k, M&& obj) + { + return insert_or_assign(k, eastl::forward(obj)).first; // we ignore the iterator hint + } + + template + template + typename hashtable::iterator + hashtable::insert_or_assign(const_iterator, key_type&& k, M&& obj) + { + return insert_or_assign(eastl::move(k), eastl::forward(obj)).first; // we ignore the iterator hint + } + + + template + typename hashtable::iterator + hashtable::erase(const_iterator i) + { + iterator iNext(i.mpNode, i.mpBucket); // Convert from const_iterator to iterator while constructing. + ++iNext; + + node_type* pNode = i.mpNode; + node_type* pNodeCurrent = *i.mpBucket; + + if(pNodeCurrent == pNode) + *i.mpBucket = pNodeCurrent->mpNext; + else + { + // We have a singly-linked list, so we have no choice but to + // walk down it till we find the node before the node at 'i'. + node_type* pNodeNext = pNodeCurrent->mpNext; + + while(pNodeNext != pNode) + { + pNodeCurrent = pNodeNext; + pNodeNext = pNodeCurrent->mpNext; + } + + pNodeCurrent->mpNext = pNodeNext->mpNext; + } + + DoFreeNode(pNode); + --mnElementCount; + + return iNext; + } + + + + template + inline typename hashtable::iterator + hashtable::erase(const_iterator first, const_iterator last) + { + while(first != last) + first = erase(first); + return iterator(first.mpNode, first.mpBucket); + } + + + + template + typename hashtable::size_type + hashtable::erase(const key_type& k) + { + // To do: Reimplement this function to do a single loop and not try to be + // smart about element contiguity. The mechanism here is only a benefit if the + // buckets are heavily overloaded; otherwise this mechanism may be slightly slower. + + const hash_code_t c = get_hash_code(k); + const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount); + const size_type nElementCountSaved = mnElementCount; + + node_type** pBucketArray = mpBucketArray + n; + + while(*pBucketArray && !compare(k, c, *pBucketArray)) + pBucketArray = &(*pBucketArray)->mpNext; + + node_type* pDeleteList = nullptr; + while(*pBucketArray && compare(k, c, *pBucketArray)) + { + node_type* const pNode = *pBucketArray; + *pBucketArray = pNode->mpNext; + // Don't free the node here, k might be a reference to the key inside this node, + // and we're re-using it when we compare to the following nodes. + // Instead, add it to the list of things to be deleted. + pNode->mpNext = pDeleteList; + pDeleteList = pNode; + --mnElementCount; + } + + while (pDeleteList) { + node_type* const pToDelete = pDeleteList; + pDeleteList = pDeleteList->mpNext; + DoFreeNode(pToDelete); + } + + return nElementCountSaved - mnElementCount; + } + + + + template + inline void hashtable::clear() + { + DoFreeNodes(mpBucketArray, mnBucketCount); + mnElementCount = 0; + } + + + + template + inline void hashtable::clear(bool clearBuckets) + { + DoFreeNodes(mpBucketArray, mnBucketCount); + if(clearBuckets) + { + DoFreeBuckets(mpBucketArray, mnBucketCount); + reset_lose_memory(); + } + mnElementCount = 0; + } + + + + template + inline void hashtable::reset_lose_memory() EA_NOEXCEPT + { + // The reset function is a special extension function which unilaterally + // resets the container to an empty state without freeing the memory of + // the contained objects. This is useful for very quickly tearing down a + // container built into scratch memory. + mnBucketCount = 1; + + #ifdef _MSC_VER + mpBucketArray = (node_type**)&gpEmptyBucketArray[0]; + #else + void* p = &gpEmptyBucketArray[0]; + memcpy(&mpBucketArray, &p, sizeof(mpBucketArray)); // Other compilers implement strict aliasing and casting is thus unsafe. + #endif + + mnElementCount = 0; + mRehashPolicy.mnNextResize = 0; + } + + + template + inline void hashtable::reserve(size_type nElementCount) + { + rehash(mRehashPolicy.GetBucketCount(uint32_t(nElementCount))); + } + + + + template + inline void hashtable::rehash(size_type nBucketCount) + { + // Note that we unilaterally use the passed in bucket count; we do not attempt migrate it + // up to the next prime number. We leave it at the user's discretion to do such a thing. + DoRehash(nBucketCount); + } + + + + template + void hashtable::DoRehash(size_type nNewBucketCount) + { + node_type** const pBucketArray = DoAllocateBuckets(nNewBucketCount); // nNewBucketCount should always be >= 2. + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + node_type* pNode; + + for(size_type i = 0; i < mnBucketCount; ++i) + { + while((pNode = mpBucketArray[i]) != NULL) // Using '!=' disables compiler warnings. + { + const size_type nNewBucketIndex = (size_type)bucket_index(pNode, (uint32_t)nNewBucketCount); + + mpBucketArray[i] = pNode->mpNext; + pNode->mpNext = pBucketArray[nNewBucketIndex]; + pBucketArray[nNewBucketIndex] = pNode; + } + } + + DoFreeBuckets(mpBucketArray, mnBucketCount); + mnBucketCount = nNewBucketCount; + mpBucketArray = pBucketArray; + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + // A failure here means that a hash function threw an exception. + // We can't restore the previous state without calling the hash + // function again, so the only sensible recovery is to delete everything. + DoFreeNodes(pBucketArray, nNewBucketCount); + DoFreeBuckets(pBucketArray, nNewBucketCount); + DoFreeNodes(mpBucketArray, mnBucketCount); + mnElementCount = 0; + throw; + } + #endif + } + + + template + inline bool hashtable::validate() const + { + // Verify our empty bucket array is unmodified. + if(gpEmptyBucketArray[0] != NULL) + return false; + + if(gpEmptyBucketArray[1] != (void*)uintptr_t(~0)) + return false; + + // Verify that we have at least one bucket. Calculations can + // trigger division by zero exceptions otherwise. + if(mnBucketCount == 0) + return false; + + // Verify that gpEmptyBucketArray is used correctly. + // gpEmptyBucketArray is only used when initially empty. + if((void**)mpBucketArray == &gpEmptyBucketArray[0]) + { + if(mnElementCount) // gpEmptyBucketArray is used only for empty hash tables. + return false; + + if(mnBucketCount != 1) // gpEmptyBucketArray is used exactly an only for mnBucketCount == 1. + return false; + } + else + { + if(mnBucketCount < 2) // Small bucket counts *must* use gpEmptyBucketArray. + return false; + } + + // Verify that the element count matches mnElementCount. + size_type nElementCount = 0; + + for(const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp) + ++nElementCount; + + if(nElementCount != mnElementCount) + return false; + + // To do: Verify that individual elements are in the expected buckets. + + return true; + } + + + template + int hashtable::validate_iterator(const_iterator i) const + { + // To do: Come up with a more efficient mechanism of doing this. + + for(const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp) + { + if(temp == i) + return (isf_valid | isf_current | isf_can_dereference); + } + + if(i == end()) + return (isf_valid | isf_current); + + return isf_none; + } + + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + // operator==, != have been moved to the specific container subclasses (e.g. hash_map). + + // The following comparison operators are deprecated and will likely be removed in a + // future version of this package. + // + // Comparing hash tables for less-ness is an odd thing to do. We provide it for + // completeness, though the user is advised to be wary of how they use this. + // + template + EASTL_REMOVE_AT_2024_APRIL inline bool operator<(const hashtable& a, + const hashtable& b) + { + // This requires hash table elements to support operator<. Since the hash table + // doesn't compare elements via less (it does so via equals), we must use the + // globally defined operator less for the elements. + return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end()); + } + + + template + EASTL_REMOVE_AT_2024_APRIL inline bool operator>(const hashtable& a, + const hashtable& b) + { + return b < a; + } + + + template + EASTL_REMOVE_AT_2024_APRIL inline bool operator<=(const hashtable& a, + const hashtable& b) + { + return !(b < a); + } + + + template + EASTL_REMOVE_AT_2024_APRIL inline bool operator>=(const hashtable& a, + const hashtable& b) + { + return !(a < b); + } + + + template + inline void swap(const hashtable& a, + const hashtable& b) + { + a.swap(b); + } + + +} // namespace eastl + + +EA_RESTORE_VC_WARNING(); + + +#endif // Header include guard diff --git a/external/EASTL/include/EASTL/internal/in_place_t.h b/external/EASTL/include/EASTL/internal/in_place_t.h new file mode 100644 index 00000000..79acd184 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/in_place_t.h @@ -0,0 +1,82 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTERNAL_IN_PLACE_T_H +#define EASTL_INTERNAL_IN_PLACE_T_H + + +#include +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +namespace eastl +{ + namespace Internal + { + struct in_place_tag {}; + template struct in_place_type_tag {}; + template struct in_place_index_tag {}; + } + + /////////////////////////////////////////////////////////////////////////////// + /// in_place_tag + /// + /// http://en.cppreference.com/w/cpp/utility/in_place_tag + /// + struct in_place_tag + { + in_place_tag() = delete; + + private: + explicit in_place_tag(Internal::in_place_tag) {} + friend inline in_place_tag Internal_ConstructInPlaceTag(); + }; + + // internal factory function for in_place_tag + inline in_place_tag Internal_ConstructInPlaceTag() { return in_place_tag(Internal::in_place_tag{}); } + + + /////////////////////////////////////////////////////////////////////////////// + /// in_place_t / in_place_type_t / in_place_index_t + /// + /// used to disambiguate overloads that take arguments (possibly a parameter + /// pack) for in-place construction of some value. + /// + /// http://en.cppreference.com/w/cpp/utility/optional/in_place_t + /// + using in_place_t = in_place_tag(&)(Internal::in_place_tag); + + template + using in_place_type_t = in_place_tag(&)(Internal::in_place_type_tag); + + template + using in_place_index_t = in_place_tag(&)(Internal::in_place_index_tag); + + + /////////////////////////////////////////////////////////////////////////////// + /// in_place / in_place / in_place + /// + /// http://en.cppreference.com/w/cpp/utility/in_place + /// + inline in_place_tag in_place(Internal::in_place_tag) { return Internal_ConstructInPlaceTag(); } + + template + inline in_place_tag in_place(Internal::in_place_type_tag) { return Internal_ConstructInPlaceTag(); } + + template + inline in_place_tag in_place(Internal::in_place_index_tag) { return Internal_ConstructInPlaceTag(); } + + +} // namespace eastl + + +#endif // Header include guard + + + + + + diff --git a/external/EASTL/include/EASTL/internal/integer_sequence.h b/external/EASTL/include/EASTL/internal/integer_sequence.h new file mode 100644 index 00000000..ba5dd4ef --- /dev/null +++ b/external/EASTL/include/EASTL/internal/integer_sequence.h @@ -0,0 +1,118 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_INTEGER_SEQUENCE_H +#define EASTL_INTEGER_SEQUENCE_H + +#include +#include +#include + +namespace eastl +{ + +#if EASTL_VARIADIC_TEMPLATES_ENABLED && !defined(EA_COMPILER_NO_TEMPLATE_ALIASES) + +// integer_sequence +template +class integer_sequence +{ +public: + typedef T value_type; + static_assert(is_integral::value, "eastl::integer_sequence can only be instantiated with an integral type"); + static EA_CONSTEXPR size_t size() EA_NOEXCEPT { return sizeof...(Ints); } +}; + +template +using index_sequence = integer_sequence; + +#if (defined(EA_COMPILER_GNUC) && EA_COMPILER_VERSION >= 8001) + +template +using make_integer_sequence = integer_sequence; + +#elif (defined(EA_COMPILER_CLANG) && EA_COMPILER_HAS_BUILTIN(__make_integer_seq)) || (defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1910)) + +template +using make_integer_sequence = __make_integer_seq; + +#else + +template +struct make_index_sequence_impl; + +template +struct make_index_sequence_impl> +{ + typedef typename make_index_sequence_impl>::type type; +}; + +template +struct make_index_sequence_impl<0, integer_sequence> +{ + typedef integer_sequence type; +}; + +template +struct integer_sequence_convert_impl; + +template +struct integer_sequence_convert_impl> +{ + typedef integer_sequence type; +}; + +template +struct make_integer_sequence_impl +{ + typedef typename integer_sequence_convert_impl>::type>::type type; +}; + +template +using make_integer_sequence = typename make_integer_sequence_impl::type; + +#endif + +template +using make_index_sequence = make_integer_sequence; + +// Helper alias template that converts any type parameter pack into an index sequence of the same length +template +using index_sequence_for = make_index_sequence; + +namespace internal +{ + +template +struct integer_sequence_size_helper; + +template +struct integer_sequence_size_helper> : public integral_constant +{ +}; + +template +struct integer_sequence_size : public integer_sequence_size_helper> +{ +}; + +template +struct index_sequence_size : public integer_sequence_size_helper> +{ +}; + +template +EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR size_t integer_sequence_size_v = integer_sequence_size::value; + +template +EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR size_t index_sequence_size_v = index_sequence_size::value; + + +} // namespace internal + +#endif // EASTL_VARIADIC_TEMPLATES_ENABLED + +} // namespace eastl + +#endif // EASTL_INTEGER_SEQUENCE_H diff --git a/external/EASTL/include/EASTL/internal/intrusive_hashtable.h b/external/EASTL/include/EASTL/internal/intrusive_hashtable.h new file mode 100644 index 00000000..941f0774 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/intrusive_hashtable.h @@ -0,0 +1,990 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// This file implements an intrusive hash table, which is a hash table whereby +// the container nodes are the hash table objects themselves. This has benefits +// primarily in terms of memory management. There are some minor limitations +// that result from this. +// +/////////////////////////////////////////////////////////////////////////////// + + + +#ifndef EASTL_INTERNAL_INTRUSIVE_HASHTABLE_H +#define EASTL_INTERNAL_INTRUSIVE_HASHTABLE_H + + +#include +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +#include +#include +#include +#include +#include +#include +#include + +EA_DISABLE_ALL_VC_WARNINGS(); +#include +#include +#include +EA_RESTORE_ALL_VC_WARNINGS(); + + +namespace eastl +{ + + /// intrusive_hash_node + /// + /// A hash_node stores an element in a hash table, much like a + /// linked list node stores an element in a linked list. + /// An intrusive_hash_node additionally can, via template parameter, + /// store a hash code in the node to speed up hash calculations + /// and comparisons in some cases. + /// + /// To consider: Make a version of intrusive_hash_node which is + /// templated on the container type. This would allow for the + /// mpNext pointer to be the container itself and thus allow + /// for easier debugging. + /// + /// Example usage: + /// struct Widget : public intrusive_hash_node{ ... }; + /// + /// struct Dagget : public intrusive_hash_node_key{ ... }; + /// + struct intrusive_hash_node + { + intrusive_hash_node* mpNext; + }; + + + template + struct intrusive_hash_node_key : public intrusive_hash_node + { + typedef Key key_type; + Key mKey; + }; + + + + /// intrusive_node_iterator + /// + /// Node iterators iterate nodes within a given bucket. + /// + /// The bConst parameter defines if the iterator is a const_iterator + /// or an iterator. + /// + template + struct intrusive_node_iterator + { + public: + typedef intrusive_node_iterator this_type; + typedef Value value_type; + typedef Value node_type; + typedef ptrdiff_t difference_type; + typedef typename conditional::type pointer; + typedef typename conditional::type reference; + typedef EASTL_ITC_NS::forward_iterator_tag iterator_category; + + public: + node_type* mpNode; + + public: + intrusive_node_iterator() + : mpNode(NULL) { } + + explicit intrusive_node_iterator(value_type* pNode) + : mpNode(pNode) { } + + intrusive_node_iterator(const intrusive_node_iterator& x) + : mpNode(x.mpNode) { } + + reference operator*() const + { return *mpNode; } + + pointer operator->() const + { return mpNode; } + + this_type& operator++() + { mpNode = static_cast(mpNode->mpNext); return *this; } + + this_type operator++(int) + { this_type temp(*this); mpNode = static_cast(mpNode->mpNext); return temp; } + + }; // intrusive_node_iterator + + + + + /// intrusive_hashtable_iterator_base + /// + /// An intrusive_hashtable_iterator_base iterates the entire hash table and + /// not just nodes within a single bucket. Users in general will use a hash + /// table iterator much more often, as it is much like other container + /// iterators (e.g. vector::iterator). + /// + /// We define a base class here because it is shared by both const and + /// non-const iterators. + /// + template + struct intrusive_hashtable_iterator_base + { + public: + typedef Value value_type; + + protected: + template + friend class intrusive_hashtable; + + template + friend struct intrusive_hashtable_iterator; + + template + friend bool operator==(const intrusive_hashtable_iterator_base&, const intrusive_hashtable_iterator_base&); + + template + friend bool operator!=(const intrusive_hashtable_iterator_base&, const intrusive_hashtable_iterator_base&); + + value_type* mpNode; // Current node within current bucket. + value_type** mpBucket; // Current bucket. + + public: + intrusive_hashtable_iterator_base(value_type* pNode, value_type** pBucket) + : mpNode(pNode), mpBucket(pBucket) { } + + void increment_bucket() + { + ++mpBucket; + while(*mpBucket == NULL) // We store an extra bucket with some non-NULL value at the end + ++mpBucket; // of the bucket array so that finding the end of the bucket + mpNode = *mpBucket; // array is quick and simple. + } + + void increment() + { + mpNode = static_cast(mpNode->mpNext); + + while(mpNode == NULL) + mpNode = *++mpBucket; + } + + }; // intrusive_hashtable_iterator_base + + + + + /// intrusive_hashtable_iterator + /// + /// An intrusive_hashtable_iterator iterates the entire hash table and not + /// just nodes within a single bucket. Users in general will use a hash + /// table iterator much more often, as it is much like other container + /// iterators (e.g. vector::iterator). + /// + /// The bConst parameter defines if the iterator is a const_iterator + /// or an iterator. + /// + template + struct intrusive_hashtable_iterator : public intrusive_hashtable_iterator_base + { + public: + typedef intrusive_hashtable_iterator_base base_type; + typedef intrusive_hashtable_iterator this_type; + typedef intrusive_hashtable_iterator this_type_non_const; + typedef typename base_type::value_type value_type; + typedef typename conditional::type pointer; + typedef typename conditional::type reference; + typedef ptrdiff_t difference_type; + typedef EASTL_ITC_NS::forward_iterator_tag iterator_category; + + public: + intrusive_hashtable_iterator() + : base_type(NULL, NULL) { } + + explicit intrusive_hashtable_iterator(value_type* pNode, value_type** pBucket) + : base_type(pNode, pBucket) { } + + explicit intrusive_hashtable_iterator(value_type** pBucket) + : base_type(*pBucket, pBucket) { } + + template ::type = false> + intrusive_hashtable_iterator(const this_type_non_const& x) + : base_type(x.mpNode, x.mpBucket) { } + + reference operator*() const + { return *base_type::mpNode; } + + pointer operator->() const + { return base_type::mpNode; } + + this_type& operator++() + { base_type::increment(); return *this; } + + this_type operator++(int) + { this_type temp(*this); base_type::increment(); return temp; } + + }; // intrusive_hashtable_iterator + + + + /// use_intrusive_key + /// + /// operator()(x) returns x.mKey. Used in maps, as opposed to sets. + /// This is a template policy implementation; it is an alternative to + /// the use_self template implementation, which is used for sets. + /// + template + struct use_intrusive_key + { + typedef Key result_type; + + const result_type& operator()(const Node& x) const + { return x.mKey; } + }; + + + + /////////////////////////////////////////////////////////////////////////// + /// intrusive_hashtable + /// + template + class intrusive_hashtable + { + public: + typedef intrusive_hashtable this_type; + typedef Key key_type; + typedef Value value_type; + typedef Value mapped_type; + typedef Value node_type; + typedef uint32_t hash_code_t; + typedef Equal key_equal; + typedef ptrdiff_t difference_type; + typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t. + typedef value_type& reference; + typedef const value_type& const_reference; + typedef intrusive_node_iterator local_iterator; + typedef intrusive_node_iterator const_local_iterator; + typedef intrusive_hashtable_iterator iterator; + typedef intrusive_hashtable_iterator const_iterator; + typedef typename conditional, iterator>::type insert_return_type; + typedef typename conditional, + eastl::use_intrusive_key >::type extract_key; + + enum + { + kBucketCount = bucketCount + }; + + protected: + node_type* mBucketArray[kBucketCount + 1]; // '+1' because we have an end bucket which is non-NULL so iterators always stop on it. + size_type mnElementCount; + Hash mHash; // To do: Use base class optimization to make this go away when it is of zero size. + Equal mEqual; // To do: Use base class optimization to make this go away when it is of zero size. + + public: + intrusive_hashtable(const Hash&, const Equal&); + + void swap(this_type& x); + + iterator begin() EA_NOEXCEPT + { + iterator i(mBucketArray); + if(!i.mpNode) + i.increment_bucket(); + return i; + } + + const_iterator begin() const EA_NOEXCEPT + { + const_iterator i(const_cast(mBucketArray)); + if(!i.mpNode) + i.increment_bucket(); + return i; + } + + const_iterator cbegin() const EA_NOEXCEPT + { + return begin(); + } + + iterator end() EA_NOEXCEPT + { return iterator(mBucketArray + kBucketCount); } + + const_iterator end() const EA_NOEXCEPT + { return const_iterator(const_cast(mBucketArray) + kBucketCount); } + + const_iterator cend() const EA_NOEXCEPT + { return const_iterator(const_cast(mBucketArray) + kBucketCount); } + + local_iterator begin(size_type n) EA_NOEXCEPT + { return local_iterator(mBucketArray[n]); } + + const_local_iterator begin(size_type n) const EA_NOEXCEPT + { return const_local_iterator(mBucketArray[n]); } + + const_local_iterator cbegin(size_type n) const EA_NOEXCEPT + { return const_local_iterator(mBucketArray[n]); } + + local_iterator end(size_type) EA_NOEXCEPT + { return local_iterator(NULL); } + + const_local_iterator end(size_type) const EA_NOEXCEPT + { return const_local_iterator(NULL); } + + const_local_iterator cend(size_type) const EA_NOEXCEPT + { return const_local_iterator(NULL); } + + size_type size() const EA_NOEXCEPT + { return mnElementCount; } + + bool empty() const EA_NOEXCEPT + { return mnElementCount == 0; } + + size_type bucket_count() const EA_NOEXCEPT // This function is unnecessary, as the user can directly reference + { return kBucketCount; } // intrusive_hashtable::kBucketCount as a constant. + + size_type bucket_size(size_type n) const EA_NOEXCEPT + { return (size_type)eastl::distance(begin(n), end(n)); } + + size_type bucket(const key_type& k) const EA_NOEXCEPT + { return (size_type)(mHash(k) % kBucketCount); } + + public: + float load_factor() const EA_NOEXCEPT + { return (float)mnElementCount / (float)kBucketCount; } + + public: + insert_return_type insert(value_type& value) + { return DoInsertValue(value, integral_constant()); } + + insert_return_type insert(const_iterator, value_type& value) + { return insert(value); } // To consider: We might be able to use the iterator argument to specify a specific insertion location. + + template + void insert(InputIterator first, InputIterator last); + + public: + iterator erase(const_iterator position); + iterator erase(const_iterator first, const_iterator last); + size_type erase(const key_type& k); + iterator remove(value_type& value); // Removes by value instead of by iterator. This is an O(1) operation, due to this hashtable being 'intrusive'. + + void clear(); + + public: + iterator find(const key_type& k); + const_iterator find(const key_type& k) const; + + /// Implements a find whereby the user supplies a comparison of a different type + /// than the hashtable value_type. A useful case of this is one whereby you have + /// a container of string objects but want to do searches via passing in char pointers. + /// The problem is that without this kind of find, you need to do the expensive operation + /// of converting the char pointer to a string so it can be used as the argument to the + /// find function. + /// + /// Example usage: + /// hash_set hashSet; + /// hashSet.find_as("hello"); // Use default hash and compare. + /// + /// Example usage (namespaces omitted for brevity): + /// hash_set hashSet; + /// hashSet.find_as("hello", hash(), equal_to<>()); + /// + template + iterator find_as(const U& u, UHash uhash, BinaryPredicate predicate); + + template + const_iterator find_as(const U& u, UHash uhash, BinaryPredicate predicate) const; + + template + iterator find_as(const U& u); + + template + const_iterator find_as(const U& u) const; + + size_type count(const key_type& k) const; + + // The use for equal_range in a hash_table seems somewhat questionable. + // The primary reason for its existence is to replicate the interface of set/map. + eastl::pair equal_range(const key_type& k); + eastl::pair equal_range(const key_type& k) const; + + public: + bool validate() const; + int validate_iterator(const_iterator i) const; + + public: + Hash hash_function() const + { return mHash; } + + EASTL_REMOVE_AT_2024_APRIL Equal equal_function() const // Deprecated. Use key_eq() instead, as key_eq is what the new C++ standard + { return mEqual; } // has specified in its hashtable (unordered_*). + + const key_equal& key_eq() const + { return mEqual; } + + key_equal& key_eq() + { return mEqual; } + + protected: + eastl::pair DoInsertValue(value_type&, true_type); // true_type means bUniqueKeys is true. + iterator DoInsertValue(value_type&, false_type); // false_type means bUniqueKeys is false. + + node_type* DoFindNode(node_type* pNode, const key_type& k) const; + + template + node_type* DoFindNode(node_type* pNode, const U& u, BinaryPredicate predicate) const; + + }; // class intrusive_hashtable + + + + + + /////////////////////////////////////////////////////////////////////// + // node_iterator_base + /////////////////////////////////////////////////////////////////////// + + template + inline bool operator==(const intrusive_node_iterator& a, + const intrusive_node_iterator& b) + { return a.mpNode == b.mpNode; } + + template + inline bool operator!=(const intrusive_node_iterator& a, + const intrusive_node_iterator& b) + { return a.mpNode != b.mpNode; } + + + + + /////////////////////////////////////////////////////////////////////// + // hashtable_iterator_base + /////////////////////////////////////////////////////////////////////// + + template + inline bool operator==(const intrusive_hashtable_iterator_base& a, + const intrusive_hashtable_iterator_base& b) + { return a.mpNode == b.mpNode; } + + + template + inline bool operator!=(const intrusive_hashtable_iterator_base& a, + const intrusive_hashtable_iterator_base& b) + { return a.mpNode != b.mpNode; } + + + + + /////////////////////////////////////////////////////////////////////// + // intrusive_hashtable + /////////////////////////////////////////////////////////////////////// + + template + inline intrusive_hashtable::intrusive_hashtable(const H& h, const Eq& eq) + : mnElementCount(0), + mHash(h), + mEqual(eq) + { + memset(mBucketArray, 0, kBucketCount * sizeof(mBucketArray[0])); + mBucketArray[kBucketCount] = reinterpret_cast((uintptr_t)~0); + } + + + template + void intrusive_hashtable::swap(this_type& x) + { + for(size_t i = 0; i < kBucketCount; i++) + eastl::swap(mBucketArray[i], x.mBucketArray[i]); + + eastl::swap(mnElementCount, x.mnElementCount); + eastl::swap(mHash, x.mHash); + eastl::swap(mEqual, x.mEqual); + } + + + template + inline typename intrusive_hashtable::iterator + intrusive_hashtable::find(const key_type& k) + { + const size_type n = (size_type)(mHash(k) % kBucketCount); + node_type* const pNode = DoFindNode(mBucketArray[n], k); + return pNode ? iterator(pNode, mBucketArray + n) : iterator(mBucketArray + kBucketCount); + } + + + template + inline typename intrusive_hashtable::const_iterator + intrusive_hashtable::find(const key_type& k) const + { + const size_type n = (size_type)(mHash(k) % kBucketCount); + node_type* const pNode = DoFindNode(mBucketArray[n], k); + return pNode ? const_iterator(pNode, const_cast(mBucketArray) + n) : const_iterator(const_cast(mBucketArray) + kBucketCount); + } + + + template + template + inline typename intrusive_hashtable::iterator + intrusive_hashtable::find_as(const U& other, UHash uhash, BinaryPredicate predicate) + { + const size_type n = (size_type)(uhash(other) % kBucketCount); + node_type* const pNode = DoFindNode(mBucketArray[n], other, predicate); + return pNode ? iterator(pNode, mBucketArray + n) : iterator(mBucketArray + kBucketCount); + } + + + template + template + inline typename intrusive_hashtable::const_iterator + intrusive_hashtable::find_as(const U& other, UHash uhash, BinaryPredicate predicate) const + { + const size_type n = (size_type)(uhash(other) % kBucketCount); + node_type* const pNode = DoFindNode(mBucketArray[n], other, predicate); + return pNode ? const_iterator(pNode, const_cast(mBucketArray) + n) : const_iterator(const_cast(mBucketArray) + kBucketCount); + } + + + /// intrusive_hashtable_find + /// + /// Helper function that defaults to using hash and equal_to<>. + /// This makes it so that by default you don't need to provide these. + /// Note that the default hash functions may not be what you want, though. + /// + /// Example usage. Instead of this: + /// hash_set hashSet; + /// hashSet.find("hello", hash(), equal_to<>()); + /// + /// You can use this: + /// hash_set hashSet; + /// hashtable_find(hashSet, "hello"); + /// + template + inline typename H::iterator intrusive_hashtable_find(H& hashTable, const U& u) + { return hashTable.find_as(u, eastl::hash(), eastl::equal_to<>()); } + + template + inline typename H::const_iterator intrusive_hashtable_find(const H& hashTable, const U& u) + { return hashTable.find_as(u, eastl::hash(), eastl::equal_to<>()); } + + + + template + template + inline typename intrusive_hashtable::iterator + intrusive_hashtable::find_as(const U& other) + { return eastl::intrusive_hashtable_find(*this, other); } + // VC++ doesn't appear to like the following, though it seems correct to me. + // So we implement the workaround above until we can straighten this out. + //{ return find_as(other, eastl::hash(), eastl::equal_to<>()); } + + + template + template + inline typename intrusive_hashtable::const_iterator + intrusive_hashtable::find_as(const U& other) const + { return eastl::intrusive_hashtable_find(*this, other); } + // VC++ doesn't appear to like the following, though it seems correct to me. + // So we implement the workaround above until we can straighten this out. + //{ return find_as(other, eastl::hash(), eastl::equal_to<>()); } + + + template + typename intrusive_hashtable::size_type + intrusive_hashtable::count(const key_type& k) const + { + const size_type n = (size_type)(mHash(k) % kBucketCount); + size_type result = 0; + extract_key extractKey; // extract_key is empty and thus this ctor is a no-op. + + // To do: Make a specialization for bU (unique keys) == true and take + // advantage of the fact that the count will always be zero or one in that case. + for(node_type* pNode = mBucketArray[n]; pNode; pNode = static_cast(pNode->mpNext)) + { + if(mEqual(k, extractKey(*pNode))) + ++result; + } + return result; + } + + + template + eastl::pair::iterator, + typename intrusive_hashtable::iterator> + intrusive_hashtable::equal_range(const key_type& k) + { + const size_type n = (size_type)(mHash(k) % kBucketCount); + node_type** head = mBucketArray + n; + node_type* pNode = DoFindNode(*head, k); + extract_key extractKey; // extract_key is empty and thus this ctor is a no-op. + + if(pNode) + { + node_type* p1 = static_cast(pNode->mpNext); + + for(; p1; p1 = static_cast(p1->mpNext)) + { + if(!mEqual(k, extractKey(*p1))) + break; + } + + iterator first(pNode, head); + iterator last(p1, head); + + if(!p1) + last.increment_bucket(); + + return eastl::pair(first, last); + } + + return eastl::pair(iterator(mBucketArray + kBucketCount), + iterator(mBucketArray + kBucketCount)); + } + + + + + template + eastl::pair::const_iterator, + typename intrusive_hashtable::const_iterator> + intrusive_hashtable::equal_range(const key_type& k) const + { + const size_type n = (size_type)(mHash(k) % kBucketCount); + node_type** head = const_cast(mBucketArray + n); + node_type* pNode = DoFindNode(*head, k); + extract_key extractKey; // extract_key is empty and thus this ctor is a no-op. + + if(pNode) + { + node_type* p1 = static_cast(pNode->mpNext); + + for(; p1; p1 = static_cast(p1->mpNext)) + { + if(!mEqual(k, extractKey(*p1))) + break; + } + + const_iterator first(pNode, head); + const_iterator last(p1, head); + + if(!p1) + last.increment_bucket(); + + return eastl::pair(first, last); + } + + return eastl::pair(const_iterator(const_cast(mBucketArray) + kBucketCount), + const_iterator(const_cast(mBucketArray) + kBucketCount)); + } + + + template + inline typename intrusive_hashtable::node_type* + intrusive_hashtable::DoFindNode(node_type* pNode, const key_type& k) const + { + extract_key extractKey; // extract_key is empty and thus this ctor is a no-op. + + for(; pNode; pNode = static_cast(pNode->mpNext)) + { + if(mEqual(k, extractKey(*pNode))) + return pNode; + } + return NULL; + } + + + template + template + inline typename intrusive_hashtable::node_type* + intrusive_hashtable::DoFindNode(node_type* pNode, const U& other, BinaryPredicate predicate) const + { + extract_key extractKey; // extract_key is empty and thus this ctor is a no-op. + + for(; pNode; pNode = static_cast(pNode->mpNext)) + { + if(predicate(extractKey(*pNode), other)) // Intentionally compare with key as first arg and other as second arg. + return pNode; + } + return NULL; + } + + + template + eastl::pair::iterator, bool> + intrusive_hashtable::DoInsertValue(value_type& value, true_type) // true_type means bUniqueKeys is true. + { + // For sets (as opposed to maps), one could argue that all insertions are successful, + // as all elements are unique. However, the equal function might not think so. + extract_key extractKey; // extract_key is empty and thus this ctor is a no-op. + const size_type n = (size_type)(mHash(extractKey(value)) % kBucketCount); + node_type* const pNode = DoFindNode(mBucketArray[n], extractKey(value)); + + if(pNode == NULL) + { + value.mpNext = mBucketArray[n]; + mBucketArray[n] = &value; + ++mnElementCount; + + return eastl::pair(iterator(&value, mBucketArray + n), true); + } + + return eastl::pair(iterator(pNode, mBucketArray + n), false); + } + + + template + typename intrusive_hashtable::iterator + intrusive_hashtable::DoInsertValue(value_type& value, false_type) // false_type means bUniqueKeys is false. + { + extract_key extractKey; // extract_key is empty and thus this ctor is a no-op. + const size_type n = (size_type)(mHash(extractKey(value)) % kBucketCount); + node_type* const pNodePrev = DoFindNode(mBucketArray[n], extractKey(value)); + + if(pNodePrev == NULL) + { + value.mpNext = mBucketArray[n]; + mBucketArray[n] = &value; + } + else + { + value.mpNext = pNodePrev->mpNext; + pNodePrev->mpNext = &value; + } + + ++mnElementCount; + + return iterator(&value, mBucketArray + n); + } + + + + template + template + inline void intrusive_hashtable::insert(InputIterator first, InputIterator last) + { + for(; first != last; ++first) + insert(*first); + } + + + template + typename intrusive_hashtable::iterator + intrusive_hashtable::erase(const_iterator i) + { + iterator iNext(i.mpNode, i.mpBucket); + ++iNext; + + node_type* pNode = i.mpNode; + node_type* pNodeCurrent = *i.mpBucket; + + if(pNodeCurrent == pNode) + *i.mpBucket = static_cast(pNodeCurrent->mpNext); + else + { + // We have a singly-linked list, so we have no choice but to + // walk down it till we find the node before the node at 'i'. + node_type* pNodeNext = static_cast(pNodeCurrent->mpNext); + + while(pNodeNext != pNode) + { + pNodeCurrent = pNodeNext; + pNodeNext = static_cast(pNodeCurrent->mpNext); + } + + pNodeCurrent->mpNext = static_cast(pNodeNext->mpNext); + } + + // To consider: In debug builds set the node mpNext to NULL. + --mnElementCount; + + return iNext; + } + + + template + inline typename intrusive_hashtable::iterator + intrusive_hashtable::erase(const_iterator first, const_iterator last) + { + while(first != last) + first = erase(first); + return iterator(first.mpNode, first.mpBucket); + } + + + template + typename intrusive_hashtable::size_type + intrusive_hashtable::erase(const key_type& k) + { + const size_type n = (size_type)(mHash(k) % kBucketCount); + const size_type nElementCountSaved = mnElementCount; + node_type*& pNodeBase = mBucketArray[n]; + extract_key extractKey; // extract_key is empty and thus this ctor is a no-op. + + // Note by Paul Pedriana: + // We have two loops here, and I'm not finding any easy way to having just one + // loop without changing the requirements of the hashtable node definition. + // It's a problem of taking an address of a variable and converting it to the + // address of another type without knowing what that type is. Perhaps I'm a + // little overly tired, so if there is a simple solution I am probably missing it. + + while(pNodeBase && mEqual(k, extractKey(*pNodeBase))) + { + pNodeBase = static_cast(pNodeBase->mpNext); + --mnElementCount; + } + + node_type* pNodePrev = pNodeBase; + + if(pNodePrev) + { + node_type* pNodeCur; + + while((pNodeCur = static_cast(pNodePrev->mpNext)) != NULL) + { + if(mEqual(k, extractKey(*pNodeCur))) + { + pNodePrev->mpNext = static_cast(pNodeCur->mpNext); + --mnElementCount; // To consider: In debug builds set the node mpNext to NULL. + } + else + pNodePrev = static_cast(pNodePrev->mpNext); + } + } + + return nElementCountSaved - mnElementCount; + } + + + template + inline typename intrusive_hashtable::iterator + intrusive_hashtable::remove(value_type& value) + { + extract_key extractKey; // extract_key is empty and thus this ctor is a no-op. + const size_type n = (size_type)(mHash(extractKey(value)) % kBucketCount); + + return erase(iterator(&value, &mBucketArray[n])); + } + + + template + inline void intrusive_hashtable::clear() + { + // To consider: In debug builds set the node mpNext to NULL. + memset(mBucketArray, 0, kBucketCount * sizeof(mBucketArray[0])); + mnElementCount = 0; + } + + + template + inline bool intrusive_hashtable::validate() const + { + // Verify that the element count matches mnElementCount. + size_type nElementCount = 0; + + for(const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp) + ++nElementCount; + + if(nElementCount != mnElementCount) + return false; + + // To do: Verify that individual elements are in the expected buckets. + + return true; + } + + + template + int intrusive_hashtable::validate_iterator(const_iterator i) const + { + // To do: Come up with a more efficient mechanism of doing this. + + for(const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp) + { + if(temp == i) + return (isf_valid | isf_current | isf_can_dereference); + } + + if(i == end()) + return (isf_valid | isf_current); + + return isf_none; + } + + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + inline bool operator==(const intrusive_hashtable& a, + const intrusive_hashtable& b) + { + return (a.size() == b.size()) && eastl::equal(a.begin(), a.end(), b.begin()); + } + + + template + inline bool operator!=(const intrusive_hashtable& a, + const intrusive_hashtable& b) + { + return !(a == b); + } + + + // Comparing hash tables for less-ness is an odd thing to do. We provide it for + // completeness, though the user is advised to be wary of how they use this. + template + inline bool operator<(const intrusive_hashtable& a, + const intrusive_hashtable& b) + { + // This requires hash table elements to support operator<. Since the hash table + // doesn't compare elements via less (it does so via equals), we must use the + // globally defined operator less for the elements. + return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end()); + } + + + template + inline bool operator>(const intrusive_hashtable& a, + const intrusive_hashtable& b) + { + return b < a; + } + + + template + inline bool operator<=(const intrusive_hashtable& a, + const intrusive_hashtable& b) + { + return !(b < a); + } + + + template + inline bool operator>=(const intrusive_hashtable& a, + const intrusive_hashtable& b) + { + return !(a < b); + } + + + template + inline void swap(const intrusive_hashtable& a, + const intrusive_hashtable& b) + { + a.swap(b); + } + + +} // namespace eastl + + + +#endif // Header include guard diff --git a/external/EASTL/include/EASTL/internal/mem_fn.h b/external/EASTL/include/EASTL/internal/mem_fn.h new file mode 100644 index 00000000..942cb918 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/mem_fn.h @@ -0,0 +1,307 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTERNAL_MEM_FN_H +#define EASTL_INTERNAL_MEM_FN_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) +#pragma once +#endif + +//////////////////////////////////////////////////////////////////////////////// +// The code in this file is a modification of the libcxx implementation. We copy +// the license information here as required. +// +// We implement only enough of mem_fn to implement eastl::function. +//////////////////////////////////////////////////////////////////////////////// + +//===------------------------ functional ----------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +EASTL_INTERNAL_DISABLE_DEPRECATED() // 'eastl::unary_function' / 'binary_function': was declared deprecated + +namespace eastl +{ + // + // apply_cv + // + template ::type>::value, + bool = is_volatile::type>::value> + struct apply_cv { typedef U type; }; + + template struct apply_cv { typedef const U type; }; + template struct apply_cv { typedef volatile U type; }; + template struct apply_cv { typedef const volatile U type; }; + template struct apply_cv { typedef U& type; }; + template struct apply_cv { typedef const U& type; }; + template struct apply_cv { typedef volatile U& type; }; + template struct apply_cv { typedef const volatile U& type; }; + + + + // + // has_result_type + // + template + struct has_result_type + { + private: + template + static eastl::no_type test(...); + + template + static eastl::yes_type test(typename U::result_type* = 0); + + public: + static const bool value = sizeof(test(0)) == sizeof(eastl::yes_type); + }; + + + + // + // derives_from_unary_function + // derives_from_binary_function + // + template + struct derives_from_unary_function + { + private: + static eastl::no_type test(...); + + template + static unary_function test(const volatile unary_function*); + + public: + static const bool value = !is_same::value; + typedef decltype(test((T*)0)) type; + }; + + template + struct derives_from_binary_function + { + private: + static eastl::no_type test(...); + template + static binary_function test(const volatile binary_function*); + + public: + static const bool value = !is_same::value; + typedef decltype(test((T*)0)) type; + }; + + + + // + // maybe_derives_from_unary_function + // maybe_derives_from_binary_function + // + template ::value> + struct maybe_derive_from_unary_function // bool is true + : public derives_from_unary_function::type { }; + + template + struct maybe_derive_from_unary_function { }; + + template ::value> + struct maybe_derive_from_binary_function // bool is true + : public derives_from_binary_function::type { }; + + template + struct maybe_derive_from_binary_function { }; + + + + // + // weak_result_type_imp + // + template ::value> + struct weak_result_type_imp // bool is true + : public maybe_derive_from_unary_function, + public maybe_derive_from_binary_function + { + EASTL_REMOVE_AT_2024_APRIL typedef typename T::result_type result_type; + }; + + template + struct weak_result_type_imp : public maybe_derive_from_unary_function, + public maybe_derive_from_binary_function { }; + + + + // + // weak_result_type + // + template + struct weak_result_type : public weak_result_type_imp { }; + + // 0 argument case + template struct weak_result_type { EASTL_REMOVE_AT_2024_APRIL typedef R result_type; }; + template struct weak_result_type { EASTL_REMOVE_AT_2024_APRIL typedef R result_type; }; + template struct weak_result_type { EASTL_REMOVE_AT_2024_APRIL typedef R result_type; }; + + // 1 argument case + template struct weak_result_type : public unary_function { }; + template struct weak_result_type : public unary_function { }; + template struct weak_result_type : public unary_function { }; + template struct weak_result_type : public unary_function { }; + template struct weak_result_type : public unary_function { }; + template struct weak_result_type : public unary_function { }; + template struct weak_result_type : public unary_function { }; + + // 2 argument case + template struct weak_result_type : public binary_function { }; + template struct weak_result_type : public binary_function { }; + template struct weak_result_type : public binary_function { }; + template struct weak_result_type : public binary_function { }; + template struct weak_result_type : public binary_function { }; + template struct weak_result_type : public binary_function { }; + template struct weak_result_type : public binary_function { }; + + // 3 or more arguments +#if EASTL_VARIADIC_TEMPLATES_ENABLED + template struct weak_result_type { EASTL_REMOVE_AT_2024_APRIL typedef R result_type; }; + template struct weak_result_type { EASTL_REMOVE_AT_2024_APRIL typedef R result_type; }; + template struct weak_result_type { EASTL_REMOVE_AT_2024_APRIL typedef R result_type; }; + template struct weak_result_type { EASTL_REMOVE_AT_2024_APRIL typedef R result_type; }; + template struct weak_result_type { EASTL_REMOVE_AT_2024_APRIL typedef R result_type; }; + template struct weak_result_type { EASTL_REMOVE_AT_2024_APRIL typedef R result_type; }; + template struct weak_result_type { EASTL_REMOVE_AT_2024_APRIL typedef R result_type; }; +#endif + + //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + // mem_fn_impl + // + template + class mem_fn_impl +#if defined(_MSC_VER) && (_MSC_VER >= 1900) // VS2015 or later + // Due to a (seemingly random) internal compiler error on VS2013 we disable eastl::unary_function and + // binary_function support for eastl::mem_fn as its not widely (if at all) used. If you require this support + // on VS2013 or below please contact us. + : public weak_result_type +#endif + { + public: + typedef T type; + + private: + type func; + + public: + EASTL_FORCE_INLINE mem_fn_impl(type _func) : func(_func) {} + +#if EASTL_VARIADIC_TEMPLATES_ENABLED + template + typename invoke_result::type operator()(ArgTypes&&... args) const + { + return invoke(func, eastl::forward(args)...); + } +#else + typename invoke_result::type operator()() const { return invoke_impl(func); } + + template + typename invoke_result0::type operator()(A0& a0) const + { + return invoke(func, a0); + } + + template + typename invoke_result1::type operator()(A0& a0, A1& a1) const + { + return invoke(func, a0, a1); + } + + template + typename invoke_result2::type operator()(A0& a0, A1& a1, A2& a2) const + { + return invoke(func, a0, a1, a2); + } +#endif + }; // mem_fn_impl + + + + //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + // mem_fn -> mem_fn_impl adapters + // + template + EASTL_FORCE_INLINE mem_fn_impl mem_fn(R T::*pm) + { return mem_fn_impl(pm); } + + template + EASTL_FORCE_INLINE mem_fn_impl mem_fn(R (T::*pm)()) + { return mem_fn_impl(pm); } + + template + EASTL_FORCE_INLINE mem_fn_impl mem_fn(R (T::*pm)(A0)) + { return mem_fn_impl(pm); } + + template + EASTL_FORCE_INLINE mem_fn_impl mem_fn(R (T::*pm)(A0, A1)) + { return mem_fn_impl(pm); } + + template + EASTL_FORCE_INLINE mem_fn_impl mem_fn(R (T::*pm)(A0, A1, A2)) + { return mem_fn_impl(pm); } + + template + EASTL_FORCE_INLINE mem_fn_impl mem_fn(R (T::*pm)() const) + { return mem_fn_impl(pm); } + + template + EASTL_FORCE_INLINE mem_fn_impl mem_fn(R (T::*pm)(A0) const) + { return mem_fn_impl(pm); } + + template + EASTL_FORCE_INLINE mem_fn_impl mem_fn(R (T::*pm)(A0, A1) const) + { return mem_fn_impl(pm); } + + template + EASTL_FORCE_INLINE mem_fn_impl mem_fn(R (T::*pm)(A0, A1, A2) const) + { return mem_fn_impl(pm); } + + template + EASTL_FORCE_INLINE mem_fn_impl mem_fn(R (T::*pm)() volatile) + { return mem_fn_impl(pm); } + + template + EASTL_FORCE_INLINE mem_fn_impl mem_fn(R (T::*pm)(A0) volatile) + { return mem_fn_impl(pm); } + + template + EASTL_FORCE_INLINE mem_fn_impl mem_fn(R (T::*pm)(A0, A1) volatile) + { return mem_fn_impl(pm); } + + template + EASTL_FORCE_INLINE mem_fn_impl mem_fn(R (T::*pm)(A0, A1, A2) volatile) + { return mem_fn_impl(pm); } + + template + EASTL_FORCE_INLINE mem_fn_impl mem_fn(R (T::*pm)() const volatile) + { return mem_fn_impl(pm); } + + template + EASTL_FORCE_INLINE mem_fn_impl mem_fn(R (T::*pm)(A0) const volatile) + { return mem_fn_impl(pm); } + + template + EASTL_FORCE_INLINE mem_fn_impl mem_fn(R (T::*pm)(A0, A1) const volatile) + { return mem_fn_impl(pm); } + + template + EASTL_FORCE_INLINE mem_fn_impl mem_fn(R (T::*pm)(A0, A1, A2) const volatile) + { return mem_fn_impl(pm); } + +} // namespace eastl + +EASTL_INTERNAL_RESTORE_DEPRECATED() + +#endif // EASTL_INTERNAL_MEM_FN_H diff --git a/external/EASTL/include/EASTL/internal/memory_base.h b/external/EASTL/include/EASTL/internal/memory_base.h new file mode 100644 index 00000000..b1c3490b --- /dev/null +++ b/external/EASTL/include/EASTL/internal/memory_base.h @@ -0,0 +1,37 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_INTERNAL_MEMORY_BASE_H +#define EASTL_INTERNAL_MEMORY_BASE_H + +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + +//////////////////////////////////////////////////////////////////////////////////////////// +// This file contains basic functionality found in the standard library 'memory' header that +// have limited or no dependencies. This allows us to utilize these utilize these functions +// in other EASTL code while avoid circular dependencies. +//////////////////////////////////////////////////////////////////////////////////////////// + +namespace eastl +{ + /// addressof + /// + /// From the C++11 Standard, section 20.6.12.1 + /// Returns the actual address of the object or function referenced by r, even in the presence of an overloaded operator&. + /// + template + T* addressof(T& value) EA_NOEXCEPT + { + return reinterpret_cast(&const_cast(reinterpret_cast(value))); + } + +} // namespace eastl + +#endif // EASTL_INTERNAL_MEMORY_BASE_H + diff --git a/external/EASTL/include/EASTL/internal/move_help.h b/external/EASTL/include/EASTL/internal/move_help.h new file mode 100644 index 00000000..97990df6 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/move_help.h @@ -0,0 +1,162 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTERNAL_MOVE_HELP_H +#define EASTL_INTERNAL_MOVE_HELP_H + + +#include +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +#include +#include + + +// C++11's rvalue references aren't supported by earlier versions of C++. +// It turns out that in a number of cases under earlier C++ versions we can +// write code that uses rvalues similar to lvalues. We have macros below for +// such cases. For example, eastl::move (same as std::move) can be treated +// as a no-op under C++03, though with the consequence that move functionality +// isn't taken advantage of. + + +/// EASTL_MOVE +/// Acts like eastl::move when possible. Same as C++11 std::move. +/// +/// EASTL_MOVE_INLINE +/// Acts like eastl::move but is implemented inline instead of a function call. +/// This allows code to be faster in debug builds in particular. +/// Depends on C++ compiler decltype support or a similar extension. +/// +/// EASTL_FORWARD +/// Acts like eastl::forward when possible. Same as C++11 std::forward. +/// +/// EASTL_FORWARD_INLINE +/// Acts like eastl::forward but is implemented inline instead of a function call. +/// This allows code to be faster in debug builds in particular. +/// +#define EASTL_MOVE(x) eastl::move(x) +#if !defined(EA_COMPILER_NO_DECLTYPE) + #define EASTL_MOVE_INLINE(x) static_cast::type&&>(x) +#elif defined(__GNUC__) + #define EASTL_MOVE_INLINE(x) static_cast::type&&>(x) +#else + #define EASTL_MOVE_INLINE(x) eastl::move(x) +#endif + +#define EASTL_FORWARD(T, x) eastl::forward(x) +#define EASTL_FORWARD_INLINE(T, x) eastl::forward(x) // Need to investigate how to properly make a macro for this. (eastl::is_reference::value ? static_cast(static_cast(x)) : static_cast(x)) + + + + +/// EASTL_MOVE_RANGE +/// Acts like the eastl::move algorithm when possible. Same as C++11 std::move. +/// Note to be confused with the single argument move: (typename remove_reference::type&& move(T&& x)) +/// http://en.cppreference.com/w/cpp/algorithm/move +/// http://en.cppreference.com/w/cpp/algorithm/move_backward +/// +#define EASTL_MOVE_RANGE(first, last, result) eastl::move(first, last, result) +#define EASTL_MOVE_BACKWARD_RANGE(first, last, resultEnd) eastl::move_backward(first, last, resultEnd) + + +namespace eastl +{ + // forward + // + // forwards the argument to another function exactly as it was passed to the calling function. + // Not to be confused with move, this is specifically for echoing templated argument types + // to another function. move is specifically about making a type be an rvalue reference (i.e. movable) type. + // + // Example usage: + // template + // void WrapperFunction(T&& arg) + // { foo(eastl::forward(arg)); } + // + // template + // void WrapperFunction(Args&&... args) + // { foo(eastl::forward(args)...); } + // + // See the C++ Standard, section 20.2.3 + // http://en.cppreference.com/w/cpp/utility/forward + // + template + EA_CPP14_CONSTEXPR T&& forward(typename eastl::remove_reference::type& x) EA_NOEXCEPT + { + return static_cast(x); + } + + + template + EA_CPP14_CONSTEXPR T&& forward(typename eastl::remove_reference::type&& x) EA_NOEXCEPT + { + static_assert(!is_lvalue_reference::value, "forward T isn't lvalue reference"); + return static_cast(x); + } + + + // move + // + // move obtains an rvalue reference to its argument and converts it to an xvalue. + // Returns, by definition: static_cast::type&&>(t). + // The primary use of this is to pass a move'd type to a function which takes T&&, + // and thus select that function instead of (e.g.) a function which takes T or T&. + // See the C++ Standard, section 20.2.3 + // http://en.cppreference.com/w/cpp/utility/move + // + template + EA_CPP14_CONSTEXPR typename eastl::remove_reference::type&& + move(T&& x) EA_NOEXCEPT + { + return static_cast::type&&>(x); + } + + + // move_if_noexcept + // + // Returns T&& if move-constructing T throws no exceptions. Instead returns const T& if + // move-constructing T throws exceptions or has no accessible copy constructor. + // The purpose of this is to use automatically use copy construction instead of move + // construction when the move may possible throw an exception. + // See the C++ Standard, section 20.2.3 + // http://en.cppreference.com/w/cpp/utility/move_if_noexcept + // + #if EASTL_EXCEPTIONS_ENABLED + template + EA_CPP14_CONSTEXPR typename eastl::conditional::value && + eastl::is_copy_constructible::value, const T&, T&&>::type + move_if_noexcept(T& x) EA_NOEXCEPT + { + return eastl::move(x); + } + #else + template + EA_CPP14_CONSTEXPR T&& + move_if_noexcept(T& x) EA_NOEXCEPT + { + return eastl::move(x); + } + #endif + +} // namespace eastl + +#endif // Header include guard + + + + + + + + + + + + + + + diff --git a/external/EASTL/include/EASTL/internal/pair_fwd_decls.h b/external/EASTL/include/EASTL/internal/pair_fwd_decls.h new file mode 100644 index 00000000..a716482d --- /dev/null +++ b/external/EASTL/include/EASTL/internal/pair_fwd_decls.h @@ -0,0 +1,16 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_PAIR_FWD_DECLS_H +#define EASTL_PAIR_FWD_DECLS_H + +#include + +namespace eastl +{ + template + struct pair; +} + +#endif // EASTL_PAIR_FWD_DECLS_H diff --git a/external/EASTL/include/EASTL/internal/piecewise_construct_t.h b/external/EASTL/include/EASTL/internal/piecewise_construct_t.h new file mode 100644 index 00000000..d853f0ea --- /dev/null +++ b/external/EASTL/include/EASTL/internal/piecewise_construct_t.h @@ -0,0 +1,46 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTERNAL_PIECEWISE_CONSTRUCT_T_H +#define EASTL_INTERNAL_PIECEWISE_CONSTRUCT_T_H + + +#include +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +namespace eastl +{ + /////////////////////////////////////////////////////////////////////////////// + /// piecewise_construct_t + /// + /// http://en.cppreference.com/w/cpp/utility/piecewise_construct_t + /// + struct piecewise_construct_t + { + explicit piecewise_construct_t() = default; + }; + + + /////////////////////////////////////////////////////////////////////////////// + /// piecewise_construct + /// + /// A tag type used to disambiguate between function overloads that take two tuple arguments. + /// + /// http://en.cppreference.com/w/cpp/utility/piecewise_construct + /// + EA_CONSTEXPR piecewise_construct_t piecewise_construct = eastl::piecewise_construct_t(); + +} // namespace eastl + + +#endif // Header include guard + + + + + + diff --git a/external/EASTL/include/EASTL/internal/red_black_tree.h b/external/EASTL/include/EASTL/internal/red_black_tree.h new file mode 100644 index 00000000..21ea0a7b --- /dev/null +++ b/external/EASTL/include/EASTL/internal/red_black_tree.h @@ -0,0 +1,2421 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_RED_BLACK_TREE_H +#define EASTL_RED_BLACK_TREE_H + + +#include +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +#include +#include +#include +#include +#include +#include +#include +#include + +EA_DISABLE_ALL_VC_WARNINGS() +#include +#include +EA_RESTORE_ALL_VC_WARNINGS() + + +// 4512/4626 - 'class' : assignment operator could not be generated +// 4530 - C++ exception handler used, but unwind semantics are not enabled. Specify /EHsc +// 4571 - catch(...) semantics changed since Visual C++ 7.1; structured exceptions (SEH) are no longer caught. +EA_DISABLE_VC_WARNING(4512 4626 4530 4571); + + +namespace eastl +{ + + /// EASTL_RBTREE_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// + #ifndef EASTL_RBTREE_DEFAULT_NAME + #define EASTL_RBTREE_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " rbtree" // Unless the user overrides something, this is "EASTL rbtree". + #endif + + + /// EASTL_RBTREE_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_RBTREE_DEFAULT_ALLOCATOR + #define EASTL_RBTREE_DEFAULT_ALLOCATOR allocator_type(EASTL_RBTREE_DEFAULT_NAME) + #endif + + + /// EASTL_RBTREE_LEGACY_SWAP_BEHAVIOUR_REQUIRES_COPY_CTOR + /// + #ifndef EASTL_RBTREE_LEGACY_SWAP_BEHAVIOUR_REQUIRES_COPY_CTOR + #define EASTL_RBTREE_LEGACY_SWAP_BEHAVIOUR_REQUIRES_COPY_CTOR 0 + #endif + + + /// RBTreeColor + /// + enum RBTreeColor + { + kRBTreeColorRed, + kRBTreeColorBlack + }; + + + + /// RBTreeColor + /// + enum RBTreeSide + { + kRBTreeSideLeft, + kRBTreeSideRight + }; + + + + /// rbtree_node_base + /// + /// We define a rbtree_node_base separately from rbtree_node (below), because it + /// allows us to have non-templated operations, and it makes it so that the + /// rbtree anchor node doesn't carry a T with it, which would waste space and + /// possibly lead to surprising the user due to extra Ts existing that the user + /// didn't explicitly create. The downside to all of this is that it makes debug + /// viewing of an rbtree harder, given that the node pointers are of type + /// rbtree_node_base and not rbtree_node. + /// + struct rbtree_node_base + { + typedef rbtree_node_base this_type; + + public: + this_type* mpNodeRight; // Declared first because it is used most often. + this_type* mpNodeLeft; + this_type* mpNodeParent; + char mColor; // We only need one bit here, would be nice if we could stuff that bit somewhere else. + }; + + + /// rbtree_node + /// + template + struct rbtree_node : public rbtree_node_base + { + Value mValue; // For set and multiset, this is the user's value, for map and multimap, this is a pair of key/value. + + // This type is never constructed, so to avoid a MSVC warning we "delete" the copy constructor. + // + // Potentially we could provide a constructor that would satisfy the compiler and change the code to use this constructor + // instead of constructing mValue in place within an unconstructed rbtree_node. + #if defined(_MSC_VER) + rbtree_node(const rbtree_node&) = delete; + #endif + }; + + + + + // rbtree_node_base functions + // + // These are the fundamental functions that we use to maintain the + // tree. The bulk of the work of the tree maintenance is done in + // these functions. + // + EASTL_API rbtree_node_base* RBTreeIncrement (const rbtree_node_base* pNode); + EASTL_API rbtree_node_base* RBTreeDecrement (const rbtree_node_base* pNode); + EASTL_API rbtree_node_base* RBTreeGetMinChild (const rbtree_node_base* pNode); + EASTL_API rbtree_node_base* RBTreeGetMaxChild (const rbtree_node_base* pNode); + EASTL_API size_t RBTreeGetBlackCount(const rbtree_node_base* pNodeTop, + const rbtree_node_base* pNodeBottom); + EASTL_API void RBTreeInsert ( rbtree_node_base* pNode, + rbtree_node_base* pNodeParent, + rbtree_node_base* pNodeAnchor, + RBTreeSide insertionSide); + EASTL_API void RBTreeErase ( rbtree_node_base* pNode, + rbtree_node_base* pNodeAnchor); + + + + + + + + /// rbtree_iterator + /// + template + struct rbtree_iterator + { + typedef rbtree_iterator this_type; + typedef rbtree_iterator iterator; + typedef rbtree_iterator const_iterator; + typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t. + typedef ptrdiff_t difference_type; + typedef T value_type; + typedef rbtree_node_base base_node_type; + typedef rbtree_node node_type; + typedef Pointer pointer; + typedef Reference reference; + typedef EASTL_ITC_NS::bidirectional_iterator_tag iterator_category; + +#if EA_IS_ENABLED(EASTL_DEPRECATIONS_FOR_2024_APRIL) + private: + base_node_type* mpNode; +#else + public: + node_type* mpNode; +#endif + + public: + rbtree_iterator(); + explicit rbtree_iterator(const base_node_type* pNode); + // Note: this isn't always a copy constructor, iterator is not always equal to this_type + rbtree_iterator(const iterator& x); + // Note: this isn't always a copy assignment operator, iterator is not always equal to this_type + rbtree_iterator& operator=(const iterator& x); + + // Calling these on the end() of a tree invokes undefined behavior. + reference operator*() const; + pointer operator->() const; + + rbtree_iterator& operator++(); + rbtree_iterator operator++(int); + + rbtree_iterator& operator--(); + rbtree_iterator operator--(int); + private: + // This is a temp helper function for the deprecation. + // It should be removed when the deprecation window ends. +#if EA_IS_ENABLED(EASTL_DEPRECATIONS_FOR_2024_APRIL) + base_node_type* toInternalNodeType(base_node_type* node) { return node; } +#else + node_type* toInternalNodeType(base_node_type* node) { return static_cast(node); } +#endif + + template + friend bool operator==(const rbtree_iterator&, const rbtree_iterator&); + + template + friend bool operator!=(const rbtree_iterator&, const rbtree_iterator&); + + template + friend bool operator!=(const rbtree_iterator&, const rbtree_iterator&); + + // rbtree uses mpNode. + template + friend class rbtree; + + // for the "copy" constructor, which uses non-const iterator even in the + // const_iterator case. + friend iterator; + friend const_iterator; + }; // rbtree_iterator + + + /////////////////////////////////////////////////////////////////////////////// + // rb_base_compare_ebo + // + // Utilizes the "empty base-class optimization" to reduce the size of the rbtree + // when its Compare template argument is an empty class. + /////////////////////////////////////////////////////////////////////////////// + + template ::value> + struct rb_base_compare_ebo + { + protected: + rb_base_compare_ebo() : mCompare() {} + rb_base_compare_ebo(const Compare& compare) : mCompare(compare) {} + + Compare& get_compare() { return mCompare; } + const Compare& get_compare() const { return mCompare; } + + template + bool compare(const T& lhs, const T& rhs) + { + return mCompare(lhs, rhs); + } + + template + bool compare(const T& lhs, const T& rhs) const + { + return mCompare(lhs, rhs); + } + + private: + Compare mCompare; + }; + + template + struct rb_base_compare_ebo : private Compare + { + protected: + rb_base_compare_ebo() {} + rb_base_compare_ebo(const Compare& compare) : Compare(compare) {} + + Compare& get_compare() { return *this; } + const Compare& get_compare() const { return *this; } + + template + bool compare(const T& lhs, const T& rhs) + { + return Compare::operator()(lhs, rhs); + } + + template + bool compare(const T& lhs, const T& rhs) const + { + return Compare::operator()(lhs, rhs); + } + }; + + + + /////////////////////////////////////////////////////////////////////////////// + // rb_base + // + // This class allows us to use a generic rbtree as the basis of map, multimap, + // set, and multiset transparently. The vital template parameters for this are + // the ExtractKey and the bUniqueKeys parameters. + // + // If the rbtree has a value type of the form pair (i.e. it is a map or + // multimap and not a set or multiset) and a key extraction policy that returns + // the first part of the pair, the rbtree gets a mapped_type typedef. + // If it satisfies those criteria and also has unique keys, then it also gets an + // operator[] (which only map and set have and multimap and multiset don't have). + // + /////////////////////////////////////////////////////////////////////////////// + + + + /// rb_base + /// This specialization is used for 'set'. In this case, Key and Value + /// will be the same as each other and ExtractKey will be eastl::use_self. + /// + template + struct rb_base : public rb_base_compare_ebo + { + typedef ExtractKey extract_key; + + protected: + using rb_base_compare_ebo::compare; + using rb_base_compare_ebo::get_compare; + + public: + rb_base() {} + rb_base(const Compare& compare) : rb_base_compare_ebo(compare) {} + }; + + + /// rb_base + /// This class is used for 'multiset'. + /// In this case, Key and Value will be the same as each + /// other and ExtractKey will be eastl::use_self. + /// + template + struct rb_base : public rb_base_compare_ebo + { + typedef ExtractKey extract_key; + + protected: + using rb_base_compare_ebo::compare; + using rb_base_compare_ebo::get_compare; + + public: + rb_base() {} + rb_base(const Compare& compare) : rb_base_compare_ebo(compare) {} + }; + + + /// rb_base + /// This specialization is used for 'map'. + /// + template + struct rb_base, true, RBTree> : public rb_base_compare_ebo + { + typedef eastl::use_first extract_key; + + using rb_base_compare_ebo::compare; + using rb_base_compare_ebo::get_compare; + + public: + rb_base() {} + rb_base(const Compare& compare) : rb_base_compare_ebo(compare) {} + }; + + + /// rb_base + /// This specialization is used for 'multimap'. + /// + template + struct rb_base, false, RBTree> : public rb_base_compare_ebo + { + typedef eastl::use_first extract_key; + + using rb_base_compare_ebo::compare; + using rb_base_compare_ebo::get_compare; + + public: + rb_base() {} + rb_base(const Compare& compare) : rb_base_compare_ebo(compare) {} + }; + + + /// rbtree + /// + /// rbtree is the red-black tree basis for the map, multimap, set, and multiset + /// containers. Just about all the work of those containers is done here, and + /// they are merely a shell which sets template policies that govern the code + /// generation for this rbtree. + /// + /// This rbtree implementation is pretty much the same as all other modern + /// rbtree implementations, as the topic is well known and researched. We may + /// choose to implement a "relaxed balancing" option at some point in the + /// future if it is deemed worthwhile. Most rbtree implementations don't do this. + /// + /// The primary rbtree member variable is mAnchor, which is a node_type and + /// acts as the end node. However, like any other node, it has mpNodeLeft, + /// mpNodeRight, and mpNodeParent members. We do the conventional trick of + /// assigning begin() (left-most rbtree node) to mpNodeLeft, assigning + /// 'end() - 1' (a.k.a. rbegin()) to mpNodeRight, and assigning the tree root + /// node to mpNodeParent. + /// + /// Compare (functor): This is a comparison class which defaults to 'less'. + /// It is a common STL thing which takes two arguments and returns true if + /// the first is less than the second. + /// + /// ExtractKey (functor): This is a class which gets the key from a stored + /// node. With map and set, the node is a pair, whereas with set and multiset + /// the node is just the value. ExtractKey will be either eastl::use_first (map and multimap) + /// or eastl::use_self (set and multiset). + /// + /// bMutableIterators (bool): true if rbtree::iterator is a mutable + /// iterator, false if iterator and const_iterator are both const iterators. + /// It will be true for map and multimap and false for set and multiset. + /// + /// bUniqueKeys (bool): true if the keys are to be unique, and false if there + /// can be multiple instances of a given key. It will be true for set and map + /// and false for multiset and multimap. + /// + /// To consider: Add an option for relaxed tree balancing. This could result + /// in performance improvements but would require a more complicated implementation. + /// + /////////////////////////////////////////////////////////////////////// + /// find_as + /// In order to support the ability to have a tree of strings but + /// be able to do efficiently lookups via char pointers (i.e. so they + /// aren't converted to string objects), we provide the find_as + /// function. This function allows you to do a find with a key of a + /// type other than the tree's key type. See the find_as function + /// for more documentation on this. + /// + template + class rbtree + : public rb_base > + { + public: + typedef ptrdiff_t difference_type; + typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t. + typedef Key key_type; + typedef Value value_type; + typedef rbtree_node node_type; + typedef value_type& reference; + typedef const value_type& const_reference; + typedef value_type* pointer; + typedef const value_type* const_pointer; + + typedef typename conditional, + rbtree_iterator >::type iterator; + typedef rbtree_iterator const_iterator; + typedef eastl::reverse_iterator reverse_iterator; + typedef eastl::reverse_iterator const_reverse_iterator; + + typedef Allocator allocator_type; + typedef Compare key_compare; + typedef typename conditional, iterator>::type insert_return_type; // map/set::insert return a pair, multimap/multiset::iterator return an iterator. + typedef rbtree this_type; + typedef rb_base base_type; + typedef integral_constant has_unique_keys_type; + typedef typename base_type::extract_key extract_key; + + protected: + using base_type::compare; + using base_type::get_compare; + + public: + rbtree_node_base mAnchor; /// This node acts as end() and its mpLeft points to begin(), and mpRight points to rbegin() (the last node on the right). + size_type mnSize; /// Stores the count of nodes in the tree (not counting the anchor node). + allocator_type mAllocator; // To do: Use base class optimization to make this go away. + + public: + // ctor/dtor + rbtree(); + rbtree(const allocator_type& allocator); + rbtree(const Compare& compare, const allocator_type& allocator = EASTL_RBTREE_DEFAULT_ALLOCATOR); + rbtree(const this_type& x); + rbtree(this_type&& x); + rbtree(this_type&& x, const allocator_type& allocator); + + template + rbtree(InputIterator first, InputIterator last, const Compare& compare, const allocator_type& allocator = EASTL_RBTREE_DEFAULT_ALLOCATOR); + + ~rbtree(); + + public: + // properties + const allocator_type& get_allocator() const EA_NOEXCEPT; + allocator_type& get_allocator() EA_NOEXCEPT; + void set_allocator(const allocator_type& allocator); + + const key_compare& key_comp() const { return get_compare(); } + key_compare& key_comp() { return get_compare(); } + + this_type& operator=(const this_type& x); + this_type& operator=(std::initializer_list ilist); + this_type& operator=(this_type&& x); + + void swap(this_type& x); + + public: + // iterators + iterator begin() EA_NOEXCEPT; + const_iterator begin() const EA_NOEXCEPT; + const_iterator cbegin() const EA_NOEXCEPT; + + iterator end() EA_NOEXCEPT; + const_iterator end() const EA_NOEXCEPT; + const_iterator cend() const EA_NOEXCEPT; + + reverse_iterator rbegin() EA_NOEXCEPT; + const_reverse_iterator rbegin() const EA_NOEXCEPT; + const_reverse_iterator crbegin() const EA_NOEXCEPT; + + reverse_iterator rend() EA_NOEXCEPT; + const_reverse_iterator rend() const EA_NOEXCEPT; + const_reverse_iterator crend() const EA_NOEXCEPT; + + public: + bool empty() const EA_NOEXCEPT; + size_type size() const EA_NOEXCEPT; + + template + insert_return_type emplace(Args&&... args); + + template + iterator emplace_hint(const_iterator position, Args&&... args); + + // Standard conversion overload to avoid the overhead of mismatched 'pair' types. + template ::value>::type> + insert_return_type insert(P&& otherValue); + + // Currently limited to value_type instead of P because it collides with insert(InputIterator, InputIterator). + // To allow this to work with templated P we need to implement a compile-time specialization for the + // case that P&& is const_iterator and have that specialization handle insert(InputIterator, InputIterator) + // instead of insert(InputIterator, InputIterator). Curiously, neither libstdc++ nor libc++ + // implement this function either, which suggests they ran into the same problem I did here + // and haven't yet resolved it (at least as of March 2014, GCC 4.8.1). + iterator insert(const_iterator hint, value_type&& value); + + /// map::insert and set::insert return a pair, while multimap::insert and + /// multiset::insert return an iterator. + insert_return_type insert(const value_type& value); + + // C++ standard: inserts value if and only if there is no element with + // key equivalent to the key of t in containers with unique keys; always + // inserts value in containers with equivalent keys. Always returns the + // iterator pointing to the element with key equivalent to the key of value. + // iterator position is a hint pointing to where the insert should start + // to search. However, there is a potential defect/improvement report on this behaviour: + // LWG issue #233 (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1780.html) + // We follow the same approach as SGI STL/STLPort and use the position as + // a forced insertion position for the value when possible. + iterator insert(const_iterator position, const value_type& value); + + void insert(std::initializer_list ilist); + + template + void insert(InputIterator first, InputIterator last); + + // TODO(rparolin): + // insert_return_type insert(node_type&& nh); + // iterator insert(const_iterator hint, node_type&& nh); + + template pair insert_or_assign(const key_type& k, M&& obj); + template pair insert_or_assign(key_type&& k, M&& obj); + template iterator insert_or_assign(const_iterator hint, const key_type& k, M&& obj); + template iterator insert_or_assign(const_iterator hint, key_type&& k, M&& obj); + + iterator erase(const_iterator position); + iterator erase(const_iterator first, const_iterator last); + reverse_iterator erase(const_reverse_iterator position); + reverse_iterator erase(const_reverse_iterator first, const_reverse_iterator last); + + // For some reason, multiple STL versions make a specialization + // for erasing an array of key_types. I'm pretty sure we don't + // need this, but just to be safe we will follow suit. + // The implementation is trivial. Returns void because the values + // could well be randomly distributed throughout the tree and thus + // a return value would be nearly meaningless. + void erase(const key_type* first, const key_type* last); + + void clear(); + void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs. + + iterator find(const key_type& key); + const_iterator find(const key_type& key) const; + + // missing transparent key support: + // template + // iterator find(const K& key); + // template + // const_iterator find(const K& key) const; + + /// Implements a find whereby the user supplies a comparison of a different type + /// than the tree's value_type. A useful case of this is one whereby you have + /// a container of string objects but want to do searches via passing in char pointers. + /// The problem is that without this kind of find, you need to do the expensive operation + /// of converting the char pointer to a string so it can be used as the argument to the + /// find function. + /// + /// Example usage (note that the compare uses string as first type and char* as second): + /// set strings; + /// strings.find_as("hello", less<>()); + /// + template iterator find_as(const U& u, Compare2 compare2); + template const_iterator find_as(const U& u, Compare2 compare2) const; + + iterator lower_bound(const key_type& key); + const_iterator lower_bound(const key_type& key) const; + + // missing transparent key support: + // template + // iterator lower_bound(const K& key); + // template + // const_iterator lower_bound(const K& key) const; + + iterator upper_bound(const key_type& key); + const_iterator upper_bound(const key_type& key) const; + + // missing transparent key support: + // template + // iterator upper_bound(const K& key); + // template + // const_iterator upper_bound(const K& key) const; + + bool validate() const; + int validate_iterator(const_iterator i) const; + + protected: + node_type* DoAllocateNode(); + void DoFreeNode(node_type* pNode); + + node_type* DoCreateNodeFromKey(const key_type& key); + + template + node_type* DoCreateNode(Args&&... args); + node_type* DoCreateNode(const value_type& value); + node_type* DoCreateNode(value_type&& value); + node_type* DoCreateNode(const node_type* pNodeSource, rbtree_node_base* pNodeParent); + + rbtree_node_base* DoCopySubtree(const node_type* pNodeSource, rbtree_node_base* pNodeDest); + void DoNukeSubtree(rbtree_node_base* pNode); + + template + eastl::pair DoInsertValue(true_type, Args&&... args); + + template + iterator DoInsertValue(false_type, Args&&... args); + + eastl::pair DoInsertValue(true_type, value_type&& value); + iterator DoInsertValue(false_type, value_type&& value); + + template + iterator DoInsertValueImpl(rbtree_node_base* pNodeParent, bool bForceToLeft, const key_type& key, Args&&... args); + iterator DoInsertValueImpl(rbtree_node_base* pNodeParent, bool bForceToLeft, const key_type& key, node_type* pNodeNew); + + eastl::pair DoInsertKey(true_type, const key_type& key); + iterator DoInsertKey(false_type, const key_type& key); + + template + iterator DoInsertValueHint(true_type, const_iterator position, Args&&... args); + + template + iterator DoInsertValueHint(false_type, const_iterator position, Args&&... args); + + iterator DoInsertValueHint(true_type, const_iterator position, value_type&& value); + iterator DoInsertValueHint(false_type, const_iterator position, value_type&& value); + + iterator DoInsertKey(true_type, const_iterator position, const key_type& key); // By design we return iterator and not a pair. + iterator DoInsertKey(false_type, const_iterator position, const key_type& key); + iterator DoInsertKeyImpl(rbtree_node_base* pNodeParent, bool bForceToLeft, const key_type& key); + + rbtree_node_base* DoGetKeyInsertionPositionUniqueKeys(bool& canInsert, const key_type& key); + rbtree_node_base* DoGetKeyInsertionPositionNonuniqueKeys(const key_type& key); + + rbtree_node_base* DoGetKeyInsertionPositionUniqueKeysHint(const_iterator position, bool& bForceToLeft, const key_type& key); + rbtree_node_base* DoGetKeyInsertionPositionNonuniqueKeysHint(const_iterator position, bool& bForceToLeft, const key_type& key); + + }; // rbtree + + + + + + /////////////////////////////////////////////////////////////////////// + // rbtree_node_base functions + /////////////////////////////////////////////////////////////////////// + + EASTL_API inline rbtree_node_base* RBTreeGetMinChild(const rbtree_node_base* pNodeBase) + { + while(pNodeBase->mpNodeLeft) + pNodeBase = pNodeBase->mpNodeLeft; + return const_cast(pNodeBase); + } + + EASTL_API inline rbtree_node_base* RBTreeGetMaxChild(const rbtree_node_base* pNodeBase) + { + while(pNodeBase->mpNodeRight) + pNodeBase = pNodeBase->mpNodeRight; + return const_cast(pNodeBase); + } + + // The rest of the functions are non-trivial and are found in + // the corresponding .cpp file to this file. + + + + /////////////////////////////////////////////////////////////////////// + // rbtree_iterator functions + /////////////////////////////////////////////////////////////////////// + + template + rbtree_iterator::rbtree_iterator() + : mpNode(NULL) { } + + + template + rbtree_iterator::rbtree_iterator(const base_node_type* pNode) + : mpNode(toInternalNodeType(const_cast(pNode))) { } + + + template + rbtree_iterator::rbtree_iterator(const iterator& x) + : mpNode(x.mpNode) { } + + template + typename rbtree_iterator::this_type& + rbtree_iterator::operator=(const iterator& x) + { + mpNode = x.mpNode; + return *this; + } + + template + typename rbtree_iterator::reference + rbtree_iterator::operator*() const + { + return static_cast(mpNode)->mValue; + } + + + template + typename rbtree_iterator::pointer + rbtree_iterator::operator->() const + { + return &static_cast(mpNode)->mValue; + } + + + template + typename rbtree_iterator::this_type& + rbtree_iterator::operator++() + { + mpNode = toInternalNodeType(RBTreeIncrement(mpNode)); + return *this; + } + + + template + typename rbtree_iterator::this_type + rbtree_iterator::operator++(int) + { + this_type temp(*this); + mpNode = toInternalNodeType(RBTreeIncrement(mpNode)); + return temp; + } + + + template + typename rbtree_iterator::this_type& + rbtree_iterator::operator--() + { + mpNode = toInternalNodeType(RBTreeDecrement(mpNode)); + return *this; + } + + + template + typename rbtree_iterator::this_type + rbtree_iterator::operator--(int) + { + this_type temp(*this); + mpNode = toInternalNodeType(RBTreeDecrement(mpNode)); + return temp; + } + + + // The C++ defect report #179 requires that we support comparisons between const and non-const iterators. + // Thus we provide additional template paremeters here to support this. The defect report does not + // require us to support comparisons between reverse_iterators and const_reverse_iterators. + template + inline bool operator==(const rbtree_iterator& a, + const rbtree_iterator& b) + { + return a.mpNode == b.mpNode; + } + + + template + inline bool operator!=(const rbtree_iterator& a, + const rbtree_iterator& b) + { + return a.mpNode != b.mpNode; + } + + + // We provide a version of operator!= for the case where the iterators are of the + // same type. This helps prevent ambiguity errors in the presence of rel_ops. + template + inline bool operator!=(const rbtree_iterator& a, + const rbtree_iterator& b) + { + return a.mpNode != b.mpNode; + } + + + + + /////////////////////////////////////////////////////////////////////// + // rbtree functions + /////////////////////////////////////////////////////////////////////// + + template + inline rbtree::rbtree() + : mAnchor(), + mnSize(0), + mAllocator(EASTL_RBTREE_DEFAULT_NAME) + { + reset_lose_memory(); + } + + + template + inline rbtree::rbtree(const allocator_type& allocator) + : mAnchor(), + mnSize(0), + mAllocator(allocator) + { + reset_lose_memory(); + } + + + template + inline rbtree::rbtree(const C& compare, const allocator_type& allocator) + : base_type(compare), + mAnchor(), + mnSize(0), + mAllocator(allocator) + { + reset_lose_memory(); + } + + + template + inline rbtree::rbtree(const this_type& x) + : base_type(x.get_compare()), + mAnchor(), + mnSize(0), + mAllocator(x.mAllocator) + { + reset_lose_memory(); + + if(x.mAnchor.mpNodeParent) // mAnchor.mpNodeParent is the rb_tree root node. + { + mAnchor.mpNodeParent = DoCopySubtree((const node_type*)x.mAnchor.mpNodeParent, &mAnchor); + mAnchor.mpNodeRight = RBTreeGetMaxChild(mAnchor.mpNodeParent); + mAnchor.mpNodeLeft = RBTreeGetMinChild(mAnchor.mpNodeParent); + mnSize = x.mnSize; + } + } + + + template + inline rbtree::rbtree(this_type&& x) + : base_type(x.get_compare()), + mAnchor(), + mnSize(0), + mAllocator(x.mAllocator) + { + reset_lose_memory(); + swap(x); + } + + template + inline rbtree::rbtree(this_type&& x, const allocator_type& allocator) + : base_type(x.get_compare()), + mAnchor(), + mnSize(0), + mAllocator(allocator) + { + reset_lose_memory(); + swap(x); // swap will directly or indirectly handle the possibility that mAllocator != x.mAllocator. + } + + + template + template + inline rbtree::rbtree(InputIterator first, InputIterator last, const C& compare, const allocator_type& allocator) + : base_type(compare), + mAnchor(), + mnSize(0), + mAllocator(allocator) + { + reset_lose_memory(); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + for(; first != last; ++first) + insert(*first); + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + clear(); + throw; + } + #endif + } + + + template + inline rbtree::~rbtree() + { + // Erase the entire tree. DoNukeSubtree is not a + // conventional erase function, as it does no rebalancing. + DoNukeSubtree((node_type*)mAnchor.mpNodeParent); + } + + + template + inline const typename rbtree::allocator_type& + rbtree::get_allocator() const EA_NOEXCEPT + { + return mAllocator; + } + + + template + inline typename rbtree::allocator_type& + rbtree::get_allocator() EA_NOEXCEPT + { + return mAllocator; + } + + + template + inline void rbtree::set_allocator(const allocator_type& allocator) + { + mAllocator = allocator; + } + + + template + inline typename rbtree::size_type + rbtree::size() const EA_NOEXCEPT + { return mnSize; } + + + template + inline bool rbtree::empty() const EA_NOEXCEPT + { return (mnSize == 0); } + + + template + inline typename rbtree::iterator + rbtree::begin() EA_NOEXCEPT + { return iterator(mAnchor.mpNodeLeft); } + + + template + inline typename rbtree::const_iterator + rbtree::begin() const EA_NOEXCEPT + { return const_iterator(mAnchor.mpNodeLeft); } + + + template + inline typename rbtree::const_iterator + rbtree::cbegin() const EA_NOEXCEPT + { return const_iterator(mAnchor.mpNodeLeft); } + + + template + inline typename rbtree::iterator + rbtree::end() EA_NOEXCEPT + { return iterator(&mAnchor); } + + + template + inline typename rbtree::const_iterator + rbtree::end() const EA_NOEXCEPT + { return const_iterator(&mAnchor); } + + + template + inline typename rbtree::const_iterator + rbtree::cend() const EA_NOEXCEPT + { return const_iterator(&mAnchor); } + + + template + inline typename rbtree::reverse_iterator + rbtree::rbegin() EA_NOEXCEPT + { return reverse_iterator(end()); } + + + template + inline typename rbtree::const_reverse_iterator + rbtree::rbegin() const EA_NOEXCEPT + { return const_reverse_iterator(end()); } + + + template + inline typename rbtree::const_reverse_iterator + rbtree::crbegin() const EA_NOEXCEPT + { return const_reverse_iterator(end()); } + + + template + inline typename rbtree::reverse_iterator + rbtree::rend() EA_NOEXCEPT + { return reverse_iterator(begin()); } + + + template + inline typename rbtree::const_reverse_iterator + rbtree::rend() const EA_NOEXCEPT + { return const_reverse_iterator(begin()); } + + + template + inline typename rbtree::const_reverse_iterator + rbtree::crend() const EA_NOEXCEPT + { return const_reverse_iterator(begin()); } + + + template + inline typename rbtree::this_type& + rbtree::operator=(const this_type& x) + { + if(this != &x) + { + clear(); + + #if EASTL_ALLOCATOR_COPY_ENABLED + mAllocator = x.mAllocator; + #endif + + get_compare() = x.get_compare(); + + if(x.mAnchor.mpNodeParent) // mAnchor.mpNodeParent is the rb_tree root node. + { + mAnchor.mpNodeParent = DoCopySubtree((const node_type*)x.mAnchor.mpNodeParent, &mAnchor); + mAnchor.mpNodeRight = RBTreeGetMaxChild(mAnchor.mpNodeParent); + mAnchor.mpNodeLeft = RBTreeGetMinChild(mAnchor.mpNodeParent); + mnSize = x.mnSize; + } + } + return *this; + } + + template + inline typename rbtree::this_type& + rbtree::operator=(this_type&& x) + { + if(this != &x) + { + clear(); // To consider: Are we really required to clear here? x is going away soon and will clear itself in its dtor. + swap(x); // member swap handles the case that x has a different allocator than our allocator by doing a copy. + } + return *this; + } + + template + inline typename rbtree::this_type& + rbtree::operator=(std::initializer_list ilist) + { + // The simplest means of doing this is to clear and insert. There probably isn't a generic + // solution that's any more efficient without having prior knowledge of the ilist contents. + clear(); + + for(typename std::initializer_list::iterator it = ilist.begin(), itEnd = ilist.end(); it != itEnd; ++it) + DoInsertValue(has_unique_keys_type(), eastl::move(*it)); + + return *this; + } + + + template + void rbtree::swap(this_type& x) + { + #if EASTL_RBTREE_LEGACY_SWAP_BEHAVIOUR_REQUIRES_COPY_CTOR + if(mAllocator == x.mAllocator) // If allocators are equivalent... + #endif + { + // Most of our members can be exchaged by a basic swap: + // We leave mAllocator as-is. + eastl::swap(mnSize, x.mnSize); + eastl::swap(get_compare(), x.get_compare()); + #if !EASTL_RBTREE_LEGACY_SWAP_BEHAVIOUR_REQUIRES_COPY_CTOR + eastl::swap(mAllocator, x.mAllocator); + #endif + + + // However, because our anchor node is a part of our class instance and not + // dynamically allocated, we can't do a swap of it but must do a more elaborate + // procedure. This is the downside to having the mAnchor be like this, but + // otherwise we consider it a good idea to avoid allocating memory for a + // nominal container instance. + + // We optimize for the expected most common case: both pointers being non-null. + if(mAnchor.mpNodeParent && x.mAnchor.mpNodeParent) // If both pointers are non-null... + { + eastl::swap(mAnchor.mpNodeRight, x.mAnchor.mpNodeRight); + eastl::swap(mAnchor.mpNodeLeft, x.mAnchor.mpNodeLeft); + eastl::swap(mAnchor.mpNodeParent, x.mAnchor.mpNodeParent); + + // We need to fix up the anchors to point to themselves (we can't just swap them). + mAnchor.mpNodeParent->mpNodeParent = &mAnchor; + x.mAnchor.mpNodeParent->mpNodeParent = &x.mAnchor; + } + else if(mAnchor.mpNodeParent) + { + x.mAnchor.mpNodeRight = mAnchor.mpNodeRight; + x.mAnchor.mpNodeLeft = mAnchor.mpNodeLeft; + x.mAnchor.mpNodeParent = mAnchor.mpNodeParent; + x.mAnchor.mpNodeParent->mpNodeParent = &x.mAnchor; + + // We need to fix up our anchor to point it itself (we can't have it swap with x). + mAnchor.mpNodeRight = &mAnchor; + mAnchor.mpNodeLeft = &mAnchor; + mAnchor.mpNodeParent = NULL; + } + else if(x.mAnchor.mpNodeParent) + { + mAnchor.mpNodeRight = x.mAnchor.mpNodeRight; + mAnchor.mpNodeLeft = x.mAnchor.mpNodeLeft; + mAnchor.mpNodeParent = x.mAnchor.mpNodeParent; + mAnchor.mpNodeParent->mpNodeParent = &mAnchor; + + // We need to fix up x's anchor to point it itself (we can't have it swap with us). + x.mAnchor.mpNodeRight = &x.mAnchor; + x.mAnchor.mpNodeLeft = &x.mAnchor; + x.mAnchor.mpNodeParent = NULL; + } // Else both are NULL and there is nothing to do. + } + #if EASTL_RBTREE_LEGACY_SWAP_BEHAVIOUR_REQUIRES_COPY_CTOR + else + { + const this_type temp(*this); // Can't call eastl::swap because that would + *this = x; // itself call this member swap function. + x = temp; + } + #endif + } + + + template + template + inline typename rbtree::insert_return_type // map/set::insert return a pair, multimap/multiset::iterator return an iterator. + rbtree::emplace(Args&&... args) + { + return DoInsertValue(has_unique_keys_type(), eastl::forward(args)...); + } + + template + template + typename rbtree::iterator + rbtree::emplace_hint(const_iterator position, Args&&... args) + { + return DoInsertValueHint(has_unique_keys_type(), position, eastl::forward(args)...); + } + + template + template + inline typename rbtree::insert_return_type // map/set::insert return a pair, multimap/multiset::iterator return an iterator. + rbtree::insert(P&& otherValue) + { + // Need to use forward instead of move because P&& is a "universal reference" instead of an rvalue reference. + return emplace(eastl::forward

(otherValue)); + } + + + template + inline typename rbtree::iterator + rbtree::insert(const_iterator position, value_type&& value) + { + return DoInsertValueHint(has_unique_keys_type(), position, eastl::move(value)); + } + + + template + inline typename rbtree::insert_return_type // map/set::insert return a pair, multimap/multiset::iterator return an iterator. + rbtree::insert(const value_type& value) + { + return DoInsertValue(has_unique_keys_type(), value); + } + + + template + typename rbtree::iterator + rbtree::insert(const_iterator position, const value_type& value) + { + return DoInsertValueHint(has_unique_keys_type(), position, value); + } + + + template + template + eastl::pair::iterator, bool> + rbtree::insert_or_assign(const key_type& k, M&& obj) + { + auto iter = find(k); + + if(iter == end()) + { + return insert(value_type(piecewise_construct, eastl::forward_as_tuple(k), eastl::forward_as_tuple(eastl::forward(obj)))); + } + else + { + iter->second = eastl::forward(obj); + return {iter, false}; + } + } + + template + template + eastl::pair::iterator, bool> + rbtree::insert_or_assign(key_type&& k, M&& obj) + { + auto iter = find(k); + + if(iter == end()) + { + return insert(value_type(piecewise_construct, eastl::forward_as_tuple(eastl::move(k)), eastl::forward_as_tuple(eastl::forward(obj)))); + } + else + { + iter->second = eastl::forward(obj); + return {iter, false}; + } + } + + template + template + typename rbtree::iterator + rbtree::insert_or_assign(const_iterator hint, const key_type& k, M&& obj) + { + auto iter = find(k); + + if(iter == end()) + { + return insert(hint, value_type(piecewise_construct, eastl::forward_as_tuple(k), eastl::forward_as_tuple(eastl::forward(obj)))); + } + else + { + iter->second = eastl::forward(obj); + return iter; + } + } + + template + template + typename rbtree::iterator + rbtree::insert_or_assign(const_iterator hint, key_type&& k, M&& obj) + { + auto iter = find(k); + + if(iter == end()) + { + return insert(hint, value_type(piecewise_construct, eastl::forward_as_tuple(eastl::move(k)), eastl::forward_as_tuple(eastl::forward(obj)))); + } + else + { + iter->second = eastl::forward(obj); + return iter; + } + } + + template + rbtree_node_base* + rbtree::DoGetKeyInsertionPositionUniqueKeys(bool& canInsert, const key_type& key) + { + // This code is essentially a slightly modified copy of the the rbtree::insert + // function whereby this version takes a key and not a full value_type. + extract_key extractKey; + + rbtree_node_base* pCurrent = mAnchor.mpNodeParent; // Start with the root node. + rbtree_node_base* pLowerBound = &mAnchor; // Set it to the container end for now. + rbtree_node_base* pParent; // This will be where we insert the new node. + + bool bValueLessThanNode = true; // If the tree is empty, this will result in an insertion at the front. + + // Find insertion position of the value. This will either be a position which + // already contains the value, a position which is greater than the value or + // end(), which we treat like a position which is greater than the value. + while(EASTL_LIKELY(pCurrent)) // Do a walk down the tree. + { + bValueLessThanNode = compare(key, extractKey(static_cast(pCurrent)->mValue)); + pLowerBound = pCurrent; + + if(bValueLessThanNode) + { + EASTL_VALIDATE_COMPARE(!compare(extractKey(static_cast(pCurrent)->mValue), key)); // Validate that the compare function is sane. + pCurrent = pCurrent->mpNodeLeft; + } + else + pCurrent = pCurrent->mpNodeRight; + } + + pParent = pLowerBound; // pLowerBound is actually upper bound right now (i.e. it is > value instead of <=), but we will make it the lower bound below. + + if(bValueLessThanNode) // If we ended up on the left side of the last parent node... + { + if(EASTL_LIKELY(pLowerBound != mAnchor.mpNodeLeft)) // If the tree was empty or if we otherwise need to insert at the very front of the tree... + { + // At this point, pLowerBound points to a node which is > than value. + // Move it back by one, so that it points to a node which is <= value. + pLowerBound = RBTreeDecrement(pLowerBound); + } + else + { + canInsert = true; + return pLowerBound; + } + } + + // Since here we require values to be unique, we will do nothing if the value already exists. + node_type* const pLowerBoundFullNode = static_cast(pLowerBound); + if(compare(extractKey(pLowerBoundFullNode->mValue), key)) // If the node is < the value (i.e. if value is >= the node)... + { + EASTL_VALIDATE_COMPARE(!compare(key, extractKey(pLowerBoundFullNode->mValue))); // Validate that the compare function is sane. + canInsert = true; + return pParent; + } + + // The item already exists (as found by the compare directly above), so return false. + canInsert = false; + return pLowerBound; + } + + + template + rbtree_node_base* + rbtree::DoGetKeyInsertionPositionNonuniqueKeys(const key_type& key) + { + // This is the pathway for insertion of non-unique keys (multimap and multiset, but not map and set). + rbtree_node_base* pCurrent = mAnchor.mpNodeParent; // Start with the root node. + rbtree_node_base* pRangeEnd = &mAnchor; // Set it to the container end for now. + extract_key extractKey; + + while(pCurrent) + { + pRangeEnd = pCurrent; + + if(compare(key, extractKey(static_cast(pCurrent)->mValue))) + { + EASTL_VALIDATE_COMPARE(!compare(extractKey(static_cast(pCurrent)->mValue), key)); // Validate that the compare function is sane. + pCurrent = pCurrent->mpNodeLeft; + } + else + pCurrent = pCurrent->mpNodeRight; + } + + return pRangeEnd; + } + + + template + eastl::pair::iterator, bool> + rbtree::DoInsertValue(true_type, value_type&& value) + { + extract_key extractKey; + key_type key(extractKey(value)); + bool canInsert; + rbtree_node_base* pPosition = DoGetKeyInsertionPositionUniqueKeys(canInsert, key); + + if(canInsert) + { + const iterator itResult(DoInsertValueImpl(pPosition, false, key, eastl::move(value))); + return pair(itResult, true); + } + + return pair(iterator(pPosition), false); + } + + + template + typename rbtree::iterator + rbtree::DoInsertValue(false_type, value_type&& value) + { + extract_key extractKey; + key_type key(extractKey(value)); + rbtree_node_base* pPosition = DoGetKeyInsertionPositionNonuniqueKeys(key); + + return DoInsertValueImpl(pPosition, false, key, eastl::move(value)); + } + + + template + template + eastl::pair::iterator, bool> + rbtree::DoInsertValue(true_type, Args&&... args) // true_type means keys are unique. + { + // This is the pathway for insertion of unique keys (map and set, but not multimap and multiset). + // Note that we return a pair and not an iterator. This is because the C++ standard for map + // and set is to return a pair and not just an iterator. + + node_type* pNodeNew = DoCreateNode(eastl::forward(args)...); // Note that pNodeNew->mpLeft, mpRight, mpParent, will be uninitialized. + const key_type& key = extract_key{}(pNodeNew->mValue); + + bool canInsert; + rbtree_node_base* pPosition = DoGetKeyInsertionPositionUniqueKeys(canInsert, key); + + if(canInsert) + { + iterator itResult(DoInsertValueImpl(pPosition, false, key, pNodeNew)); + return pair(itResult, true); + } + + DoFreeNode(pNodeNew); + return pair(iterator(pPosition), false); + } + + + template + template + typename rbtree::iterator + rbtree::DoInsertValue(false_type, Args&&... args) // false_type means keys are not unique. + { + // We have a problem here if sizeof(value_type) is too big for the stack. We may want to consider having a specialization for large value_types. + // To do: Change this so that we call DoCreateNode(eastl::forward(args)...) here and use the value from the resulting pNode to get the + // key, and make DoInsertValueImpl take that node as an argument. That way there is no value created on the stack. + + node_type* const pNodeNew = DoCreateNode(eastl::forward(args)...); // Note that pNodeNew->mpLeft, mpRight, mpParent, will be uninitialized. + const key_type& key = extract_key{}(pNodeNew->mValue); + + rbtree_node_base* pPosition = DoGetKeyInsertionPositionNonuniqueKeys(key); + + return DoInsertValueImpl(pPosition, false, key, pNodeNew); + } + + + template + template + typename rbtree::iterator + rbtree::DoInsertValueImpl(rbtree_node_base* pNodeParent, bool bForceToLeft, const key_type& key, Args&&... args) + { + node_type* const pNodeNew = DoCreateNode(eastl::forward(args)...); // Note that pNodeNew->mpLeft, mpRight, mpParent, will be uninitialized. + return DoInsertValueImpl(pNodeParent, bForceToLeft, key, pNodeNew); + } + + + template + typename rbtree::iterator + rbtree::DoInsertValueImpl(rbtree_node_base* pNodeParent, bool bForceToLeft, const key_type& key, node_type* pNodeNew) + { + EASTL_ASSERT_MSG(pNodeNew != nullptr, "node to insert to the rbtree must not be null"); + + RBTreeSide side; + extract_key extractKey; + + // The reason we may want to have bForceToLeft == true is that pNodeParent->mValue and value may be equal. + // In that case it doesn't matter what side we insert on, except that the C++ LWG #233 improvement report + // suggests that we should use the insert hint position to force an ordering. So that's what we do. + if(bForceToLeft || (pNodeParent == &mAnchor) || compare(key, extractKey(static_cast(pNodeParent)->mValue))) + side = kRBTreeSideLeft; + else + side = kRBTreeSideRight; + + RBTreeInsert(pNodeNew, pNodeParent, &mAnchor, side); + mnSize++; + + return iterator(pNodeNew); + } + + + template + eastl::pair::iterator, bool> + rbtree::DoInsertKey(true_type, const key_type& key) // true_type means keys are unique. + { + // This is the pathway for insertion of unique keys (map and set, but not multimap and multiset). + // Note that we return a pair and not an iterator. This is because the C++ standard for map + // and set is to return a pair and not just an iterator. + bool canInsert; + rbtree_node_base* pPosition = DoGetKeyInsertionPositionUniqueKeys(canInsert, key); + + if(canInsert) + { + const iterator itResult(DoInsertKeyImpl(pPosition, false, key)); + return pair(itResult, true); + } + + return pair(iterator(pPosition), false); + } + + + template + typename rbtree::iterator + rbtree::DoInsertKey(false_type, const key_type& key) // false_type means keys are not unique. + { + rbtree_node_base* pPosition = DoGetKeyInsertionPositionNonuniqueKeys(key); + return DoInsertKeyImpl(pPosition, false, key); + } + + + + template + rbtree_node_base* + rbtree::DoGetKeyInsertionPositionUniqueKeysHint(const_iterator position, bool& bForceToLeft, const key_type& key) + { + extract_key extractKey; + + if((position.mpNode != mAnchor.mpNodeRight) && (position.mpNode != &mAnchor)) // If the user specified a specific insertion position... + { + iterator itNext(position.mpNode); + ++itNext; + + // To consider: Change this so that 'position' specifies the position after + // where the insertion goes and not the position before where the insertion goes. + // Doing so would make this more in line with user expectations and with LWG #233. + const bool bPositionLessThanValue = compare(extractKey(*position), key); + + if(bPositionLessThanValue) // If (value > *position)... + { + EASTL_VALIDATE_COMPARE(!compare(key, extractKey(*position))); // Validate that the compare function is sane. + + const bool bValueLessThanNext = compare(key, extractKey(*itNext)); + + if(bValueLessThanNext) // If value < *itNext... + { + EASTL_VALIDATE_COMPARE(!compare(extractKey(*itNext), key)); // Validate that the compare function is sane. + + if(position.mpNode->mpNodeRight) + { + bForceToLeft = true; // Specifically insert in front of (to the left of) itNext (and thus after 'position'). + return itNext.mpNode; + } + + bForceToLeft = false; + return position.mpNode; + } + } + + bForceToLeft = false; + return nullptr; // The above specified hint was not useful, then we do a regular insertion. + } + + if(mnSize && compare(extractKey(static_cast(mAnchor.mpNodeRight)->mValue), key)) + { + EASTL_VALIDATE_COMPARE(!compare(key, extractKey(static_cast(mAnchor.mpNodeRight)->mValue))); // Validate that the compare function is sane. + bForceToLeft = false; + return mAnchor.mpNodeRight; + } + + bForceToLeft = false; + return NULL; // The caller can do a default insert. + } + + + template + rbtree_node_base* + rbtree::DoGetKeyInsertionPositionNonuniqueKeysHint(const_iterator position, bool& bForceToLeft, const key_type& key) + { + extract_key extractKey; + + if((position.mpNode != mAnchor.mpNodeRight) && (position.mpNode != &mAnchor)) // If the user specified a specific insertion position... + { + iterator itNext(position.mpNode); + ++itNext; + + // To consider: Change this so that 'position' specifies the position after + // where the insertion goes and not the position before where the insertion goes. + // Doing so would make this more in line with user expectations and with LWG #233. + if(!compare(key, extractKey(*position)) && // If value >= *position && + !compare(extractKey(*itNext), key)) // if value <= *itNext... + { + if(position.mpNode->mpNodeRight) // If there are any nodes to the right... [this expression will always be true as long as we aren't at the end()] + { + bForceToLeft = true; // Specifically insert in front of (to the left of) itNext (and thus after 'position'). + return itNext.mpNode; + } + + bForceToLeft = false; + return position.mpNode; + } + + bForceToLeft = false; + return nullptr; // The above specified hint was not useful, then we do a regular insertion. + } + + // This pathway shouldn't be commonly executed, as the user shouldn't be calling + // this hinted version of insert if the user isn't providing a useful hint. + if(mnSize && !compare(key, extractKey(static_cast(mAnchor.mpNodeRight)->mValue))) // If we are non-empty and the value is >= the last node... + { + bForceToLeft =false; + return mAnchor.mpNodeRight; + } + + bForceToLeft = false; + return nullptr; + } + + template + template + typename rbtree::iterator + rbtree::DoInsertValueHint(true_type, const_iterator position, Args&&... args) // true_type means keys are unique. + { + // This is the pathway for insertion of unique keys (map and set, but not multimap and multiset). + // + // We follow the same approach as SGI STL/STLPort and use the position as + // a forced insertion position for the value when possible. + + node_type* pNodeNew = DoCreateNode(eastl::forward(args)...); // Note that pNodeNew->mpLeft, mpRight, mpParent, will be uninitialized. + const key_type& key(extract_key{}(pNodeNew->mValue)); + + bool bForceToLeft; + rbtree_node_base* pPosition = DoGetKeyInsertionPositionUniqueKeysHint(position, bForceToLeft, key); + + if (!pPosition) + { + bool canInsert; + pPosition = DoGetKeyInsertionPositionUniqueKeys(canInsert, key); + + if (!canInsert) + { + DoFreeNode(pNodeNew); + return iterator(pPosition); + } + + bForceToLeft = false; + } + + return DoInsertValueImpl(pPosition, bForceToLeft, key, pNodeNew); + } + + + template + template + typename rbtree::iterator + rbtree::DoInsertValueHint(false_type, const_iterator position, Args&&... args) // false_type means keys are not unique. + { + // This is the pathway for insertion of non-unique keys (multimap and multiset, but not map and set). + // + // We follow the same approach as SGI STL/STLPort and use the position as + // a forced insertion position for the value when possible. + + node_type* pNodeNew = DoCreateNode(eastl::forward(args)...); // Note that pNodeNew->mpLeft, mpRight, mpParent, will be uninitialized. + const key_type& key(extract_key{}(pNodeNew->mValue)); + + bool bForceToLeft; + rbtree_node_base* pPosition = DoGetKeyInsertionPositionNonuniqueKeysHint(position, bForceToLeft, key); + + if (!pPosition) + { + pPosition = DoGetKeyInsertionPositionNonuniqueKeys(key); + bForceToLeft = false; + } + + return DoInsertValueImpl(pPosition, bForceToLeft, key, pNodeNew); + } + + + template + typename rbtree::iterator + rbtree::DoInsertValueHint(true_type, const_iterator position, value_type&& value) // true_type means keys are unique. + { + // This is the pathway for insertion of unique keys (map and set, but not multimap and multiset). + // + // We follow the same approach as SGI STL/STLPort and use the position as + // a forced insertion position for the value when possible. + + extract_key extractKey; + key_type key(extractKey(value)); + bool bForceToLeft; + rbtree_node_base* pPosition = DoGetKeyInsertionPositionUniqueKeysHint(position, bForceToLeft, key); + + if(pPosition) + return DoInsertValueImpl(pPosition, bForceToLeft, key, eastl::move(value)); + else + return DoInsertValue(has_unique_keys_type(), eastl::move(value)).first; + } + + + template + typename rbtree::iterator + rbtree::DoInsertValueHint(false_type, const_iterator position, value_type&& value) // false_type means keys are not unique. + { + // This is the pathway for insertion of non-unique keys (multimap and multiset, but not map and set). + // + // We follow the same approach as SGI STL/STLPort and use the position as + // a forced insertion position for the value when possible. + extract_key extractKey; + key_type key(extractKey(value)); + bool bForceToLeft; + rbtree_node_base* pPosition = DoGetKeyInsertionPositionNonuniqueKeysHint(position, bForceToLeft, key); + + if(pPosition) + return DoInsertValueImpl(pPosition, bForceToLeft, key, eastl::move(value)); + else + return DoInsertValue(has_unique_keys_type(), eastl::move(value)); + } + + + template + typename rbtree::iterator + rbtree::DoInsertKey(true_type, const_iterator position, const key_type& key) // true_type means keys are unique. + { + bool bForceToLeft; + rbtree_node_base* pPosition = DoGetKeyInsertionPositionUniqueKeysHint(position, bForceToLeft, key); + + if(pPosition) + return DoInsertKeyImpl(pPosition, bForceToLeft, key); + else + return DoInsertKey(has_unique_keys_type(), key).first; + } + + + template + typename rbtree::iterator + rbtree::DoInsertKey(false_type, const_iterator position, const key_type& key) // false_type means keys are not unique. + { + // This is the pathway for insertion of non-unique keys (multimap and multiset, but not map and set). + // + // We follow the same approach as SGI STL/STLPort and use the position as + // a forced insertion position for the value when possible. + bool bForceToLeft; + rbtree_node_base* pPosition = DoGetKeyInsertionPositionNonuniqueKeysHint(position, bForceToLeft, key); + + if(pPosition) + return DoInsertKeyImpl(pPosition, bForceToLeft, key); + else + return DoInsertKey(has_unique_keys_type(), key); // We are empty or we are inserting at the end. + } + + + template + typename rbtree::iterator + rbtree::DoInsertKeyImpl(rbtree_node_base* pNodeParent, bool bForceToLeft, const key_type& key) + { + RBTreeSide side; + extract_key extractKey; + + // The reason we may want to have bForceToLeft == true is that pNodeParent->mValue and value may be equal. + // In that case it doesn't matter what side we insert on, except that the C++ LWG #233 improvement report + // suggests that we should use the insert hint position to force an ordering. So that's what we do. + if(bForceToLeft || (pNodeParent == &mAnchor) || compare(key, extractKey(static_cast(pNodeParent)->mValue))) + side = kRBTreeSideLeft; + else + side = kRBTreeSideRight; + + node_type* const pNodeNew = DoCreateNodeFromKey(key); // Note that pNodeNew->mpLeft, mpRight, mpParent, will be uninitialized. + RBTreeInsert(pNodeNew, pNodeParent, &mAnchor, side); + mnSize++; + + return iterator(pNodeNew); + } + + + template + void rbtree::insert(std::initializer_list ilist) + { + for(typename std::initializer_list::iterator it = ilist.begin(), itEnd = ilist.end(); it != itEnd; ++it) + DoInsertValue(has_unique_keys_type(), eastl::move(*it)); + } + + + template + template + void rbtree::insert(InputIterator first, InputIterator last) + { + for( ; first != last; ++first) + DoInsertValue(has_unique_keys_type(), *first); // Or maybe we should call 'insert(end(), *first)' instead. If the first-last range was sorted then this might make some sense. + } + + + template + inline void rbtree::clear() + { + // Erase the entire tree. DoNukeSubtree is not a + // conventional erase function, as it does no rebalancing. + DoNukeSubtree((node_type*)mAnchor.mpNodeParent); + reset_lose_memory(); + } + + + template + inline void rbtree::reset_lose_memory() + { + // The reset_lose_memory function is a special extension function which unilaterally + // resets the container to an empty state without freeing the memory of + // the contained objects. This is useful for very quickly tearing down a + // container built into scratch memory. + mAnchor.mpNodeRight = &mAnchor; + mAnchor.mpNodeLeft = &mAnchor; + mAnchor.mpNodeParent = NULL; + mAnchor.mColor = kRBTreeColorRed; + mnSize = 0; + } + + + template + inline typename rbtree::iterator + rbtree::erase(const_iterator position) + { + const iterator iErase(position.mpNode); + --mnSize; // Interleave this between the two references to itNext. We expect no exceptions to occur during the code below. + ++position; + RBTreeErase(iErase.mpNode, &mAnchor); + DoFreeNode(static_cast(iErase.mpNode)); + return iterator(position.mpNode); + } + + + template + typename rbtree::iterator + rbtree::erase(const_iterator first, const_iterator last) + { + // We expect that if the user means to clear the container, they will call clear. + if(EASTL_LIKELY((first.mpNode != mAnchor.mpNodeLeft) || (last.mpNode != &mAnchor))) // If (first != begin or last != end) ... + { + // Basic implementation: + while(first != last) + first = erase(first); + return iterator(first.mpNode); + + // Inlined implementation: + //size_type n = 0; + //while(first != last) + //{ + // const iterator itErase(first); + // ++n; + // ++first; + // RBTreeErase(itErase.mpNode, &mAnchor); + // DoFreeNode(itErase.mpNode); + //} + //mnSize -= n; + //return first; + } + + clear(); + return iterator(&mAnchor); // Same as: return end(); + } + + + template + inline typename rbtree::reverse_iterator + rbtree::erase(const_reverse_iterator position) + { + return reverse_iterator(erase((++position).base())); + } + + + template + typename rbtree::reverse_iterator + rbtree::erase(const_reverse_iterator first, const_reverse_iterator last) + { + // Version which erases in order from first to last. + // difference_type i(first.base() - last.base()); + // while(i--) + // first = erase(first); + // return first; + + // Version which erases in order from last to first, but is slightly more efficient: + return reverse_iterator(erase((++last).base(), (++first).base())); + } + + + template + inline void rbtree::erase(const key_type* first, const key_type* last) + { + // We have no choice but to run a loop like this, as the first/last range could + // have values that are discontiguously located in the tree. And some may not + // even be in the tree. + while(first != last) + erase(*first++); + } + + + template + typename rbtree::iterator + rbtree::find(const key_type& key) + { + // To consider: Implement this instead via calling lower_bound and + // inspecting the result. The following is an implementation of this: + // const iterator it(lower_bound(key)); + // return ((it.mpNode == &mAnchor) || compare(key, extractKey(it.mpNode->mValue))) ? iterator(&mAnchor) : it; + // We don't currently implement the above because in practice people tend to call + // find a lot with trees, but very uncommonly call lower_bound. + extract_key extractKey; + + rbtree_node_base* pCurrent = mAnchor.mpNodeParent; // Start with the root node. + rbtree_node_base* pRangeEnd = &mAnchor; // Set it to the container end for now. + + while(EASTL_LIKELY(pCurrent)) // Do a walk down the tree. + { + if(EASTL_LIKELY(!compare(extractKey(static_cast(pCurrent)->mValue), key))) // If pCurrent is >= key... + { + pRangeEnd = pCurrent; + pCurrent = pCurrent->mpNodeLeft; + } + else + { + EASTL_VALIDATE_COMPARE(!compare(key, extractKey(static_cast(pCurrent)->mValue))); // Validate that the compare function is sane. + pCurrent = pCurrent->mpNodeRight; + } + } + + if(EASTL_LIKELY((pRangeEnd != &mAnchor) && !compare(key, extractKey(static_cast(pRangeEnd)->mValue)))) + return iterator(pRangeEnd); + return iterator(&mAnchor); + } + + + template + inline typename rbtree::const_iterator + rbtree::find(const key_type& key) const + { + typedef rbtree rbtree_type; + return const_iterator(const_cast(this)->find(key)); + } + + + template + template + typename rbtree::iterator + rbtree::find_as(const U& u, Compare2 compare2) + { + extract_key extractKey; + + rbtree_node_base* pCurrent = mAnchor.mpNodeParent; // Start with the root node. + rbtree_node_base* pRangeEnd = &mAnchor; // Set it to the container end for now. + + while(EASTL_LIKELY(pCurrent)) // Do a walk down the tree. + { + if(EASTL_LIKELY(!compare2(extractKey(static_cast(pCurrent)->mValue), u))) // If pCurrent is >= u... + { + pRangeEnd = pCurrent; + pCurrent = pCurrent->mpNodeLeft; + } + else + { + EASTL_VALIDATE_COMPARE(!compare2(u, extractKey(static_cast(pCurrent)->mValue))); // Validate that the compare function is sane. + pCurrent = pCurrent->mpNodeRight; + } + } + + if(EASTL_LIKELY((pRangeEnd != &mAnchor) && !compare2(u, extractKey(static_cast(pRangeEnd)->mValue)))) + return iterator(pRangeEnd); + return iterator(&mAnchor); + } + + + template + template + inline typename rbtree::const_iterator + rbtree::find_as(const U& u, Compare2 compare2) const + { + typedef rbtree rbtree_type; + return const_iterator(const_cast(this)->find_as(u, compare2)); + } + + + template + typename rbtree::iterator + rbtree::lower_bound(const key_type& key) + { + extract_key extractKey; + + rbtree_node_base* pCurrent = mAnchor.mpNodeParent; // Start with the root node. + rbtree_node_base* pRangeEnd = &mAnchor; // Set it to the container end for now. + + while(EASTL_LIKELY(pCurrent)) // Do a walk down the tree. + { + if(EASTL_LIKELY(!compare(extractKey(static_cast(pCurrent)->mValue), key))) // If pCurrent is >= key... + { + pRangeEnd = pCurrent; + pCurrent = pCurrent->mpNodeLeft; + } + else + { + EASTL_VALIDATE_COMPARE(!compare(key, extractKey(static_cast(pCurrent)->mValue))); // Validate that the compare function is sane. + pCurrent = pCurrent->mpNodeRight; + } + } + + return iterator(pRangeEnd); + } + + + template + inline typename rbtree::const_iterator + rbtree::lower_bound(const key_type& key) const + { + typedef rbtree rbtree_type; + return const_iterator(const_cast(this)->lower_bound(key)); + } + + + template + typename rbtree::iterator + rbtree::upper_bound(const key_type& key) + { + extract_key extractKey; + + rbtree_node_base* pCurrent = mAnchor.mpNodeParent; // Start with the root node. + rbtree_node_base* pRangeEnd = &mAnchor; // set it to the container end for now. + + while(EASTL_LIKELY(pCurrent)) // Do a walk down the tree. + { + if(EASTL_LIKELY(compare(key, extractKey(static_cast(pCurrent)->mValue)))) // If key is < pCurrent... + { + EASTL_VALIDATE_COMPARE(!compare(extractKey(static_cast(pCurrent)->mValue), key)); // Validate that the compare function is sane. + pRangeEnd = pCurrent; + pCurrent = pCurrent->mpNodeLeft; + } + else + pCurrent = pCurrent->mpNodeRight; + } + + return iterator(pRangeEnd); + } + + + template + inline typename rbtree::const_iterator + rbtree::upper_bound(const key_type& key) const + { + typedef rbtree rbtree_type; + return const_iterator(const_cast(this)->upper_bound(key)); + } + + + // To do: Move this validate function entirely to a template-less implementation. + template + bool rbtree::validate() const + { + // Red-black trees have the following canonical properties which we validate here: + // 1 Every node is either red or black. + // 2 Every leaf (NULL) is black by defintion. Any number of black nodes may appear in a sequence. + // 3 If a node is red, then both its children are black. Thus, on any path from + // the root to a leaf, red nodes must not be adjacent. + // 4 Every simple path from a node to a descendant leaf contains the same number of black nodes. + // 5 The mnSize member of the tree must equal the number of nodes in the tree. + // 6 The tree is sorted as per a conventional binary tree. + // 7 The comparison function is sane; it obeys strict weak ordering. If compare(a,b) is true, then compare(b,a) must be false. Both cannot be true. + + extract_key extractKey; + + if(mnSize) + { + // Verify basic integrity. + //if(!mAnchor.mpNodeParent || (mAnchor.mpNodeLeft == mAnchor.mpNodeRight)) + // return false; // Fix this for case of empty tree. + + if(mAnchor.mpNodeLeft != RBTreeGetMinChild(mAnchor.mpNodeParent)) + return false; + + if(mAnchor.mpNodeRight != RBTreeGetMaxChild(mAnchor.mpNodeParent)) + return false; + + const size_t nBlackCount = RBTreeGetBlackCount(mAnchor.mpNodeParent, mAnchor.mpNodeLeft); + size_type nIteratedSize = 0; + + for(const_iterator it = begin(); it != end(); ++it, ++nIteratedSize) + { + const node_type* const pNode = (const node_type*)it.mpNode; + const node_type* const pNodeRight = (const node_type*)pNode->mpNodeRight; + const node_type* const pNodeLeft = (const node_type*)pNode->mpNodeLeft; + + // Verify #7 above. + if(pNodeRight && compare(extractKey(pNodeRight->mValue), extractKey(pNode->mValue)) && compare(extractKey(pNode->mValue), extractKey(pNodeRight->mValue))) // Validate that the compare function is sane. + return false; + + // Verify #7 above. + if(pNodeLeft && compare(extractKey(pNodeLeft->mValue), extractKey(pNode->mValue)) && compare(extractKey(pNode->mValue), extractKey(pNodeLeft->mValue))) // Validate that the compare function is sane. + return false; + + // Verify item #1 above. + if((pNode->mColor != kRBTreeColorRed) && (pNode->mColor != kRBTreeColorBlack)) + return false; + + // Verify item #3 above. + if(pNode->mColor == kRBTreeColorRed) + { + if((pNodeRight && (pNodeRight->mColor == kRBTreeColorRed)) || + (pNodeLeft && (pNodeLeft->mColor == kRBTreeColorRed))) + return false; + } + + // Verify item #6 above. + if(pNodeRight && compare(extractKey(pNodeRight->mValue), extractKey(pNode->mValue))) + return false; + + if(pNodeLeft && compare(extractKey(pNode->mValue), extractKey(pNodeLeft->mValue))) + return false; + + if(!pNodeRight && !pNodeLeft) // If we are at a bottom node of the tree... + { + // Verify item #4 above. + if(RBTreeGetBlackCount(mAnchor.mpNodeParent, pNode) != nBlackCount) + return false; + } + } + + // Verify item #5 above. + if(nIteratedSize != mnSize) + return false; + + return true; + } + else + { + if((mAnchor.mpNodeLeft != &mAnchor) || (mAnchor.mpNodeRight != &mAnchor)) + return false; + } + + return true; + } + + + template + inline int rbtree::validate_iterator(const_iterator i) const + { + // To do: Come up with a more efficient mechanism of doing this. + + for(const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp) + { + if(temp == i) + return (isf_valid | isf_current | isf_can_dereference); + } + + if(i == end()) + return (isf_valid | isf_current); + + return isf_none; + } + + + template + inline typename rbtree::node_type* + rbtree::DoAllocateNode() + { + auto* pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(node_type), 0); + EASTL_ASSERT_MSG(pNode != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined."); + + return pNode; + } + + + template + inline void rbtree::DoFreeNode(node_type* pNode) + { + pNode->~node_type(); + EASTLFree(mAllocator, pNode, sizeof(node_type)); + } + + + template + typename rbtree::node_type* + rbtree::DoCreateNodeFromKey(const key_type& key) + { + // Note that this function intentionally leaves the node pointers uninitialized. + // The caller would otherwise just turn right around and modify them, so there's + // no point in us initializing them to anything (except in a debug build). + node_type* const pNode = DoAllocateNode(); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + ::new (eastl::addressof(pNode->mValue)) value_type(pair_first_construct, key); + + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + DoFreeNode(pNode); + throw; + } + #endif + + #if EASTL_DEBUG + pNode->mpNodeRight = NULL; + pNode->mpNodeLeft = NULL; + pNode->mpNodeParent = NULL; + pNode->mColor = kRBTreeColorBlack; + #endif + + return pNode; + } + + + template + typename rbtree::node_type* + rbtree::DoCreateNode(const value_type& value) + { + // Note that this function intentionally leaves the node pointers uninitialized. + // The caller would otherwise just turn right around and modify them, so there's + // no point in us initializing them to anything (except in a debug build). + node_type* const pNode = DoAllocateNode(); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + ::new(eastl::addressof(pNode->mValue)) value_type(value); + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + DoFreeNode(pNode); + throw; + } + #endif + + #if EASTL_DEBUG + pNode->mpNodeRight = NULL; + pNode->mpNodeLeft = NULL; + pNode->mpNodeParent = NULL; + pNode->mColor = kRBTreeColorBlack; + #endif + + return pNode; + } + + + template + typename rbtree::node_type* + rbtree::DoCreateNode(value_type&& value) + { + // Note that this function intentionally leaves the node pointers uninitialized. + // The caller would otherwise just turn right around and modify them, so there's + // no point in us initializing them to anything (except in a debug build). + node_type* const pNode = DoAllocateNode(); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + ::new(eastl::addressof(pNode->mValue)) value_type(eastl::move(value)); + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + DoFreeNode(pNode); + throw; + } + #endif + + #if EASTL_DEBUG + pNode->mpNodeRight = NULL; + pNode->mpNodeLeft = NULL; + pNode->mpNodeParent = NULL; + pNode->mColor = kRBTreeColorBlack; + #endif + + return pNode; + } + + + template + template + typename rbtree::node_type* + rbtree::DoCreateNode(Args&&... args) + { + // Note that this function intentionally leaves the node pointers uninitialized. + // The caller would otherwise just turn right around and modify them, so there's + // no point in us initializing them to anything (except in a debug build). + node_type* const pNode = DoAllocateNode(); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + ::new(eastl::addressof(pNode->mValue)) value_type(eastl::forward(args)...); + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + DoFreeNode(pNode); + throw; + } + #endif + + #if EASTL_DEBUG + pNode->mpNodeRight = NULL; + pNode->mpNodeLeft = NULL; + pNode->mpNodeParent = NULL; + pNode->mColor = kRBTreeColorBlack; + #endif + + return pNode; + } + + + template + typename rbtree::node_type* + rbtree::DoCreateNode(const node_type* pNodeSource, rbtree_node_base* pNodeParent) + { + node_type* const pNode = DoCreateNode(pNodeSource->mValue); + + pNode->mpNodeRight = NULL; + pNode->mpNodeLeft = NULL; + pNode->mpNodeParent = pNodeParent; + pNode->mColor = pNodeSource->mColor; + + return pNode; + } + + + template + rbtree_node_base* + rbtree::DoCopySubtree(const node_type* pNodeSource, rbtree_node_base* pNodeDest) + { + node_type* const pNewNodeRoot = DoCreateNode(pNodeSource, pNodeDest); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + // Copy the right side of the tree recursively. + if(pNodeSource->mpNodeRight) + pNewNodeRoot->mpNodeRight = DoCopySubtree(static_cast(pNodeSource->mpNodeRight), pNewNodeRoot); + + rbtree_node_base* pNewNodeLeft; + + for(pNodeSource = static_cast(pNodeSource->mpNodeLeft), pNodeDest = pNewNodeRoot; + pNodeSource; + pNodeSource = static_cast(pNodeSource->mpNodeLeft), pNodeDest = pNewNodeLeft) + { + pNewNodeLeft = DoCreateNode(pNodeSource, pNodeDest); + + pNodeDest->mpNodeLeft = pNewNodeLeft; + + // Copy the right side of the tree recursively. + if(pNodeSource->mpNodeRight) + pNewNodeLeft->mpNodeRight = DoCopySubtree(static_cast(pNodeSource->mpNodeRight), pNewNodeLeft); + } + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + DoNukeSubtree(pNewNodeRoot); + throw; + } + #endif + + return pNewNodeRoot; + } + + + template + void rbtree::DoNukeSubtree(rbtree_node_base* pNode) + { + while(pNode) // Recursively traverse the tree and destroy items as we go. + { + DoNukeSubtree(pNode->mpNodeRight); + + node_type* const pNodeLeft = static_cast(pNode->mpNodeLeft); + DoFreeNode(static_cast(pNode)); + pNode = pNodeLeft; + } + } + + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + inline bool operator==(const rbtree& a, const rbtree& b) + { + return (a.size() == b.size()) && eastl::equal(a.begin(), a.end(), b.begin()); + } + + + // Note that in operator< we do comparisons based on the tree value_type with operator<() of the + // value_type instead of the tree's Compare function. For set/multiset, the value_type is T, while + // for map/multimap the value_type is a pair. operator< for pair can be seen by looking + // utility.h, but it basically is uses the operator< for pair.first and pair.second. The C++ standard + // appears to require this behaviour, whether intentionally or not. If anything, a good reason to do + // this is for consistency. A map and a vector that contain the same items should compare the same. + template + inline bool operator<(const rbtree& a, const rbtree& b) + { + return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end()); + } + + + template + inline bool operator!=(const rbtree& a, const rbtree& b) + { + return !(a == b); + } + + + template + inline bool operator>(const rbtree& a, const rbtree& b) + { + return b < a; + } + + + template + inline bool operator<=(const rbtree& a, const rbtree& b) + { + return !(b < a); + } + + + template + inline bool operator>=(const rbtree& a, const rbtree& b) + { + return !(a < b); + } + + + template + inline void swap(rbtree& a, rbtree& b) + { + a.swap(b); + } + + +} // namespace eastl + + +EA_RESTORE_VC_WARNING(); + + +#endif // Header include guard diff --git a/external/EASTL/include/EASTL/internal/smart_ptr.h b/external/EASTL/include/EASTL/internal/smart_ptr.h new file mode 100644 index 00000000..8a37950f --- /dev/null +++ b/external/EASTL/include/EASTL/internal/smart_ptr.h @@ -0,0 +1,267 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTERNAL_SMART_PTR_H +#define EASTL_INTERNAL_SMART_PTR_H + + +#include +#include +#include +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +namespace eastl +{ + + namespace Internal + { + // Tells if the Deleter type has a typedef for pointer to T. If so then return it, + // else return T*. The large majority of the time the pointer type will be T*. + // The C++11 Standard requires that scoped_ptr let the deleter define the pointer type. + // + // Example usage: + // typedef typename unique_pointer_type::type pointer + // + template + class unique_pointer_type + { + template + static typename U::pointer test(typename U::pointer*); + + template + static T* test(...); + + public: + typedef decltype(test::type>(0)) type; + }; + + + /////////////////////////////////////////////////////////////////////// + // is_array_cv_convertible + // + // Tells if the array pointer P1 is cv-convertible to array pointer P2. + // The two types have two be equivalent pointer types and be convertible + // when you consider const/volatile properties of them. + // + // Example usage: + // is_array_cv_convertible::value => false + // is_array_cv_convertible::value => false + // is_array_cv_convertible::value => false + // is_array_cv_convertible::value => false + // is_array_cv_convertible::value => false + // is_array_cv_convertible::value => true + // is_array_cv_convertible::value => true + // is_array_cv_convertible::value => true + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_array_cv_convertible_CONFORMANCE 1 + + template ::element_type>, + eastl::remove_cv_t::element_type>>> + struct is_array_cv_convertible_impl + : public eastl::is_convertible {}; // Return true if P1 is convertible to P2. + + template + struct is_array_cv_convertible_impl + : public eastl::false_type {}; // P1's underlying type is not the same as P2's, so it can't be converted, even if P2 refers to a subclass of P1. Parent == Child, but Parent[] != Child[] + + template && !eastl::is_pointer_v> + struct is_array_cv_convertible + : public is_array_cv_convertible_impl {}; + + template + struct is_array_cv_convertible + : public eastl::false_type {}; // P1 is scalar not a pointer, so it can't be converted to a pointer. + + + /////////////////////////////////////////////////////////////////////// + // is_derived + // + // Given two (possibly identical) types Base and Derived, is_base_of::value == true + // if and only if Base is a direct or indirect base class of Derived. This is like is_base_of + // but returns false if Derived is the same as Base. So is_derived is true only if Derived is actually a subclass + // of Base and not Base itself. + // + // is_derived may only be applied to complete types. + // + // Example usage: + // is_derived::value => false + // is_derived::value => false + // is_derived::value => true + // is_derived::value => false + /////////////////////////////////////////////////////////////////////// + + #if EASTL_TYPE_TRAIT_is_base_of_CONFORMANCE + #define EASTL_TYPE_TRAIT_is_derived_CONFORMANCE 1 + + template + struct is_derived : public eastl::integral_constant::value && !eastl::is_same::type, typename eastl::remove_cv::type>::value> {}; + #else + #define EASTL_TYPE_TRAIT_is_derived_CONFORMANCE 0 + + template // This returns true if Derived is unrelated to Base. That's a wrong answer, but is better for us than returning false for compilers that don't support is_base_of. + struct is_derived : public eastl::integral_constant::type, typename eastl::remove_cv::type>::value> {}; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_safe_array_conversion + // + // Say you have two array types: T* t and U* u. You want to assign the u to t but only if + // that's a safe thing to do. As shown in the logic below, the array conversion + // is safe if U* and T* are convertible, if U is an array, and if either U or T is not + // a pointer or U is not derived from T. + // + // Note: Usage of this class could be replaced with is_array_cv_convertible usage. + // To do: Do this replacement and test it. + // + /////////////////////////////////////////////////////////////////////// + + template + struct is_safe_array_conversion : public eastl::integral_constant::value && + eastl::is_array::value && + (!eastl::is_pointer::value || !is_pointer::value || !Internal::is_derived::type>::value)> {}; + + } // namespace Internal + + + + + + + + /// default_delete + /// + /// C++11 smart pointer default delete function class. + /// + /// Provides a default way to delete an object. This default is simply to call delete on the + /// object pointer. You can provide an alternative to this class or you can override this on + /// a class-by-class basis like the following: + /// template <> + /// struct smart_ptr_deleter + /// { + /// void operator()(MyClass* p) const + /// { SomeCustomFunction(p); } + /// }; + /// + template + struct default_delete + { + #if defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION <= 4006) // GCC prior to 4.7 has a bug with noexcept here. + EA_CONSTEXPR default_delete() = default; + #else + EA_CONSTEXPR default_delete() EA_NOEXCEPT = default; + #endif + + template // Enable if T* can be constructed with U* (i.e. U* is convertible to T*). + default_delete(const default_delete&, typename eastl::enable_if::value>::type* = 0) EA_NOEXCEPT {} + + void operator()(T* p) const EA_NOEXCEPT + { + static_assert(eastl::internal::is_complete_type_v, "Attempting to call the destructor of an incomplete type"); + delete p; + } + }; + + + template + struct default_delete // Specialization for arrays. + { + #if defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION <= 4006) // GCC prior to 4.7 has a bug with noexcept here. + EA_CONSTEXPR default_delete() = default; + #else + EA_CONSTEXPR default_delete() EA_NOEXCEPT = default; + #endif + + template // This ctor is enabled if T is equal to or a base of U, and if U is less or equal const/volatile-qualified than T. + default_delete(const default_delete&, typename eastl::enable_if::value>::type* = 0) EA_NOEXCEPT {} + + void operator()(T* p) const EA_NOEXCEPT + { delete[] p; } + }; + + + + + /// smart_ptr_deleter + /// + /// Deprecated in favor of the C++11 name: default_delete + /// + template + struct smart_ptr_deleter + { + typedef T value_type; + + void operator()(const value_type* p) const // We use a const argument type in order to be most flexible with what types we accept. + { delete const_cast(p); } + }; + + template <> + struct smart_ptr_deleter + { + typedef void value_type; + + void operator()(const void* p) const + { delete[] (char*)p; } // We don't seem to have much choice but to cast to a scalar type. + }; + + template <> + struct smart_ptr_deleter + { + typedef void value_type; + + void operator()(const void* p) const + { delete[] (char*)p; } // We don't seem to have much choice but to cast to a scalar type. + }; + + + + /// smart_array_deleter + /// + /// Deprecated in favor of the C++11 name: default_delete + /// + template + struct smart_array_deleter + { + typedef T value_type; + + void operator()(const value_type* p) const // We use a const argument type in order to be most flexible with what types we accept. + { delete[] const_cast(p); } + }; + + template <> + struct smart_array_deleter + { + typedef void value_type; + + void operator()(const void* p) const + { delete[] (char*)p; } // We don't seem to have much choice but to cast to a scalar type. + }; + + +} // namespace eastl + + +#endif // Header include guard + + + + + + + + + + + + + + + + + diff --git a/external/EASTL/include/EASTL/internal/thread_support.h b/external/EASTL/include/EASTL/internal/thread_support.h new file mode 100644 index 00000000..49856c09 --- /dev/null +++ b/external/EASTL/include/EASTL/internal/thread_support.h @@ -0,0 +1,160 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTERNAL_THREAD_SUPPORT_H +#define EASTL_INTERNAL_THREAD_SUPPORT_H + + +#include +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif +#include + +///////////////////////////////////////////////////////////////////////////////////////////////////// +// NOTE(rparolin): We need a fallback mutex implementation because the Microsoft implementation +// of std::mutex can not be included in managed-cpp code. +// +// fatal error C1189: is not supported when compiling with /clr or /clr:pure +///////////////////////////////////////////////////////////////////////////////////////////////////// +#if !defined(EASTL_CPP11_MUTEX_ENABLED) + #if defined(EA_HAVE_CPP11_MUTEX) && !defined(EA_COMPILER_MANAGED_CPP) + #define EASTL_CPP11_MUTEX_ENABLED 1 + #else + #define EASTL_CPP11_MUTEX_ENABLED 0 + #endif +#endif + +#if EASTL_CPP11_MUTEX_ENABLED + EA_DISABLE_ALL_VC_WARNINGS() + #include + EA_RESTORE_ALL_VC_WARNINGS() +#endif + +#if defined(EA_PLATFORM_MICROSOFT) + // Cannot include Windows headers in our headers, as they kill builds with their #defines. +#elif defined(EA_PLATFORM_POSIX) + #include +#endif + +// copy constructor could not be generated because a base class copy constructor is inaccessible or deleted. +// assignment operator could not be generated because a base class assignment operator is inaccessible or deleted. +// non dll-interface class used as base for DLL-interface classkey 'identifier'. +EA_DISABLE_VC_WARNING(4625 4626 4275); + + +#if defined(EA_PLATFORM_MICROSOFT) + #if defined(EA_PROCESSOR_POWERPC) + extern "C" long __stdcall _InterlockedIncrement(long volatile* Addend); + #pragma intrinsic (_InterlockedIncrement) + + extern "C" long __stdcall _InterlockedDecrement(long volatile* Addend); + #pragma intrinsic (_InterlockedDecrement) + + extern "C" long __stdcall _InterlockedCompareExchange(long volatile* Dest, long Exchange, long Comp); + #pragma intrinsic (_InterlockedCompareExchange) + #else + extern "C" long _InterlockedIncrement(long volatile* Addend); + #pragma intrinsic (_InterlockedIncrement) + + extern "C" long _InterlockedDecrement(long volatile* Addend); + #pragma intrinsic (_InterlockedDecrement) + + extern "C" long _InterlockedCompareExchange(long volatile* Dest, long Exchange, long Comp); + #pragma intrinsic (_InterlockedCompareExchange) + #endif +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_THREAD_SUPPORT_AVAILABLE +// +// Defined as 0 or 1, based on existing support. +// Identifies if thread support (e.g. atomics, mutexes) is available for use. +// The large majority of EASTL doesn't use thread support, but a few parts +// of it (e.g. shared_ptr) do. +/////////////////////////////////////////////////////////////////////////////// + +#if !defined(EASTL_THREAD_SUPPORT_AVAILABLE) + #if defined(__clang__) || (defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4003)) + #define EASTL_THREAD_SUPPORT_AVAILABLE 1 + #elif defined(EA_COMPILER_MSVC) + #define EASTL_THREAD_SUPPORT_AVAILABLE 1 + #else + #define EASTL_THREAD_SUPPORT_AVAILABLE 0 + #endif +#endif + + +namespace eastl +{ + namespace Internal + { + // mutex + #if EASTL_CPP11_MUTEX_ENABLED + using std::mutex; + #else + class EASTL_API mutex + { + public: + mutex(); + ~mutex(); + + void lock(); + void unlock(); + + protected: + #if defined(EA_PLATFORM_MICROSOFT) + #if defined(_WIN64) + uint64_t mMutexBuffer[40 / sizeof(uint64_t)]; // CRITICAL_SECTION is 40 bytes on Win64. + #elif defined(_WIN32) + uint32_t mMutexBuffer[24 / sizeof(uint32_t)]; // CRITICAL_SECTION is 24 bytes on Win32. + #endif + #elif defined(EA_PLATFORM_POSIX) + pthread_mutex_t mMutex; + #endif + }; + #endif + + + // auto_mutex + class EASTL_API auto_mutex + { + public: + EA_FORCE_INLINE auto_mutex(mutex& mutex) : pMutex(&mutex) + { pMutex->lock(); } + + EA_FORCE_INLINE ~auto_mutex() + { pMutex->unlock(); } + + protected: + mutex* pMutex; + + auto_mutex(const auto_mutex&) = delete; + void operator=(const auto_mutex&) = delete; + }; + + + // shared_ptr_auto_mutex + class EASTL_API shared_ptr_auto_mutex : public auto_mutex + { + public: + shared_ptr_auto_mutex(const void* pSharedPtr); + + shared_ptr_auto_mutex(const shared_ptr_auto_mutex&) = delete; + void operator=(shared_ptr_auto_mutex&&) = delete; + }; + + + } // namespace Internal + +} // namespace eastl + + +EA_RESTORE_VC_WARNING(); + + +#endif // Header include guard diff --git a/external/EASTL/include/EASTL/internal/tuple_fwd_decls.h b/external/EASTL/include/EASTL/internal/tuple_fwd_decls.h new file mode 100644 index 00000000..d88163ab --- /dev/null +++ b/external/EASTL/include/EASTL/internal/tuple_fwd_decls.h @@ -0,0 +1,70 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_TUPLE_FWD_DECLS_H +#define EASTL_TUPLE_FWD_DECLS_H + +#include + +#if EASTL_TUPLE_ENABLED + +namespace eastl +{ + template + class tuple; + + template + struct tuple_size; + +#if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR size_t tuple_size_v = tuple_size::value; +#endif + + template + struct tuple_element; + + template + using tuple_element_t = typename tuple_element::type; + + template struct is_lvalue_reference; + + template + struct conditional; + + template struct add_lvalue_reference; + + template struct remove_reference; + + // const typename for tuple_element_t, for when tuple or TupleImpl cannot itself be const + template + using const_tuple_element_t = typename conditional< + is_lvalue_reference>::value, + typename add_lvalue_reference>::type>::type, + const tuple_element_t + >::type; + + // get + template + tuple_element_t>& get(tuple& t); + + template + const_tuple_element_t>& get(const tuple& t); + + template + tuple_element_t>&& get(tuple&& t); + + template + T& get(tuple& t); + + template + const T& get(const tuple& t); + + template + T&& get(tuple&& t); +} + +#endif // EASTL_VARIADIC_TEMPLATES_ENABLED + +#endif // EASTL_TUPLE_FWD_DECLS_H diff --git a/external/EASTL/include/EASTL/internal/type_compound.h b/external/EASTL/include/EASTL/internal/type_compound.h new file mode 100644 index 00000000..9871f43c --- /dev/null +++ b/external/EASTL/include/EASTL/internal/type_compound.h @@ -0,0 +1,717 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTERNAL_TYPE_COMPOUND_H +#define EASTL_INTERNAL_TYPE_COMPOUND_H + + +#include +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +// Until we revise the code below to handle EDG warnings, we don't have much choice but to disable them. +#if defined(__EDG_VERSION__) + #pragma diag_suppress=1931 // operand of sizeof is not a type, variable, or dereferenced pointer expression +#endif + + +namespace eastl +{ + + /////////////////////////////////////////////////////////////////////// + // extent + // + // extent::value is an integral type representing the number of + // elements in the Ith dimension of array type T. + // + // For a given array type T[N], extent::value == N. + // For a given multi-dimensional array type T[M][N], extent::value == N. + // For a given multi-dimensional array type T[M][N], extent::value == M. + // For a given array type T and a given dimension I where I >= rank::value, extent::value == 0. + // For a given array type of unknown extent T[], extent::value == 0. + // For a given non-array type T and an arbitrary dimension I, extent::value == 0. + // + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_extent_CONFORMANCE 1 // extent is conforming. + + template + struct extent_help : public eastl::integral_constant {}; + + template + struct extent_help : public eastl::integral_constant {}; + + template + struct extent_help : public eastl::extent_help { }; + + template + struct extent_help : public eastl::extent_help {}; + + template // extent uses unsigned instead of size_t. + struct extent : public eastl::extent_help { }; + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR auto extent_v = extent::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_array + // + // is_array::value == true if and only if T is an array type, + // including unbounded array types. + // + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_array_CONFORMANCE 1 // is_array is conforming; doesn't make mistakes. + + template + struct is_array : public eastl::false_type {}; + + template + struct is_array : public eastl::true_type {}; + + template + struct is_array : public eastl::true_type {}; + + #if !defined(EA_COMPILER_NO_TEMPLATE_ALIASES) + template + EA_CONSTEXPR bool is_array_v = is_array::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_array_of_known_bounds + // + // Deprecated in C++20. Use is_bounded_array. + // + // is_array_of_known_bounds::value is true if T is an array and is + // of known bounds. is_array_of_unknown_bounds::value == true, + // while is_array_of_unknown_bounds::value = false. + // + /////////////////////////////////////////////////////////////////////// + + template + struct EASTL_REMOVE_AT_2024_APRIL is_array_of_known_bounds + : public eastl::integral_constant::value != 0> {}; + + + /////////////////////////////////////////////////////////////////////// + // is_array_of_unknown_bounds + // + // Deprecated in C++20. Use is_unbounded_array. + // + // is_array_of_unknown_bounds::value is true if T is an array but is + // of unknown bounds. is_array_of_unknown_bounds::value == false, + // while is_array_of_unknown_bounds::value = true. + // + /////////////////////////////////////////////////////////////////////// + + template + struct EASTL_REMOVE_AT_2024_APRIL is_array_of_unknown_bounds + : public eastl::integral_constant::value && (eastl::extent::value == 0)> {}; + + + /////////////////////////////////////////////////////////////////////// + // is_member_function_pointer + // + // is_member_function_pointer::value == true if and only if T is a + // pointer to member function type. + // + /////////////////////////////////////////////////////////////////////// + // We detect member functions with 0 to N arguments. We can extend this + // for additional arguments if necessary. + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_member_function_pointer_CONFORMANCE 1 // is_member_function_pointer is conforming; doesn't make mistakes. + + namespace internal + { + template + struct is_member_function_pointer_helper : false_type {}; + + template + struct is_member_function_pointer_helper : is_function {}; + } + + template + struct is_member_function_pointer + : internal::is_member_function_pointer_helper::type> {}; + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_member_function_pointer_v = is_member_function_pointer::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_member_pointer + // + // is_member_pointer::value == true if and only if: + // is_member_object_pointer::value == true, or + // is_member_function_pointer::value == true + // + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_member_pointer_CONFORMANCE 1 // is_member_pointer is conforming; doesn't make mistakes. + + namespace internal { + template + struct is_member_pointer_helper + : public eastl::false_type {}; + + template + struct is_member_pointer_helper + : public eastl::true_type {}; + } + + template + struct is_member_pointer + : public internal::is_member_pointer_helper::type>::type {}; + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_member_pointer_v = is_member_pointer::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_member_object_pointer + // + // is_member_object_pointer::value == true if and only if T is a + // pointer to data member type. + // + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_member_object_pointer_CONFORMANCE 1 // is_member_object_pointer is conforming; doesn't make mistakes. + + template + struct is_member_object_pointer : public eastl::integral_constant::value && + !eastl::is_member_function_pointer::value + > {}; + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_member_object_pointer_v = is_member_object_pointer::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_pointer + // + // is_pointer::value == true if and only if T is a pointer type. + // This category includes function pointer types, but not pointer to + // member types. + // + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_pointer_CONFORMANCE 1 // is_pointer is conforming; doesn't make mistakes. + + template struct is_pointer_helper : public false_type{}; + + template struct is_pointer_helper : public true_type{}; + template struct is_pointer_helper : public true_type{}; + template struct is_pointer_helper : public true_type{}; + template struct is_pointer_helper : public true_type{}; + + template + struct is_pointer_value : public bool_constant::value && !is_member_pointer::value> {}; + + template + struct is_pointer : public integral_constant::value>{}; + + #if !defined(EA_COMPILER_NO_TEMPLATE_ALIASES) + template + EA_CONSTEXPR bool is_pointer_v = is_pointer::value; + #endif + + + + /////////////////////////////////////////////////////////////////////// + // is_convertible + // + // Given two (possible identical) types From and To, is_convertible::value == true + // if and only if an lvalue of type From can be implicitly converted to type To, + // or is_void::value == true + // + // An instance of the type predicate holds true if the expression To to = from;, where from is an object of type From, is well-formed. + // + // is_convertible may only be applied to complete types. + // Type To may not be an abstract type. + // If the conversion is ambiguous, the program is ill-formed. + // If either or both of From and To are class types, and the conversion would invoke + // non-public member functions of either From or To (such as a private constructor of To, + // or a private conversion operator of From), the program is ill-formed. + // + // Note that without compiler help, both is_convertible and is_base + // can produce compiler errors if the conversion is ambiguous. + // Example: + // struct A {}; + // struct B : A {}; + // struct C : A {}; + // struct D : B, C {}; + // is_convertible::value; // Generates compiler error. + /////////////////////////////////////////////////////////////////////// + + #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || (defined(__clang__) && EA_COMPILER_HAS_FEATURE(is_convertible_to))) + #define EASTL_TYPE_TRAIT_is_convertible_CONFORMANCE 1 // is_convertible is conforming. + + // Problem: VC++ reports that int is convertible to short, yet if you construct a short from an int then VC++ generates a warning: + // warning C4242: 'initializing' : conversion from 'int' to 'short', possible loss of data. We can deal with this by making + // is_convertible be false for conversions that could result in loss of data. Or we could make another trait called is_lossless_convertible + // and use that appropriately in our code. Or we could put the onus on the user to work around such warnings. + template + struct is_convertible : public integral_constant{}; + + #else + #define EASTL_TYPE_TRAIT_is_convertible_CONFORMANCE 1 + + template::value || eastl::is_function::value || eastl::is_array::value > + struct is_convertible_helper // Anything is convertible to void. Nothing is convertible to a function or an array. + { static const bool value = eastl::is_void::value; }; + + template + class is_convertible_helper + { + template + static void ToFunction(To1); // We try to call this function with an instance of From. It is valid if From can be converted to To. + + template + static eastl::no_type is(...); + + template + static decltype(ToFunction(eastl::declval()), eastl::yes_type()) is(int); + + public: + static const bool value = sizeof(is(0)) == 1; + }; + + template + struct is_convertible + : public integral_constant::value> {}; + + #endif + + #if !defined(EA_COMPILER_NO_TEMPLATE_ALIASES) + template + EA_CONSTEXPR bool is_convertible_v = is_convertible::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_nothrow_convertible + // + // https://en.cppreference.com/w/cpp/types/is_convertible + // + // template + // struct is_explicitly_convertible + // : public is_constructible {}; + /////////////////////////////////////////////////////////////////////// + // TODO(rparolin): implement type-trait + + + + /////////////////////////////////////////////////////////////////////// + // is_explicitly_convertible + // + // This sometime-seen extension trait is the same as is_constructible + // and so we don't define it. + // + // template + // struct is_explicitly_convertible + // : public is_constructible {}; + /////////////////////////////////////////////////////////////////////// + + + + /////////////////////////////////////////////////////////////////////// + // is_union + // + // is_union::value == true if and only if T is a union type. + // + // There is no way to tell if a type is a union without compiler help. + // As of this writing, only Metrowerks v8+ supports such functionality + // via 'msl::is_union::value'. The user can force something to be + // evaluated as a union via EASTL_DECLARE_UNION. + /////////////////////////////////////////////////////////////////////// + #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || defined(EA_COMPILER_GNUC) || (defined(__clang__) && EA_COMPILER_HAS_FEATURE(is_union))) + #define EASTL_TYPE_TRAIT_is_union_CONFORMANCE 1 // is_union is conforming. + + template + struct is_union : public integral_constant{}; + #else + #define EASTL_TYPE_TRAIT_is_union_CONFORMANCE 0 // is_union is not fully conforming. + + template struct is_union : public false_type{}; + #endif + + #define EASTL_DECLARE_UNION(T) namespace eastl{ template <> struct EASTL_REMOVE_AT_2024_APRIL is_union : public true_type{}; template <> struct is_union : public true_type{}; } + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_union_v = is_union::value; + #endif + + + + /////////////////////////////////////////////////////////////////////// + // is_class + // + // is_class::value == true if and only if T is a class or struct + // type (and not a union type). + // + // Without specific compiler help, it is not possible to + // distinguish between unions and classes. As a result, is_class + // will erroneously evaluate to true for union types. + /////////////////////////////////////////////////////////////////////// + #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || defined(EA_COMPILER_GNUC) || (defined(__clang__) && EA_COMPILER_HAS_FEATURE(is_class))) + #define EASTL_TYPE_TRAIT_is_class_CONFORMANCE 1 // is_class is conforming. + + template + struct is_class : public integral_constant{}; + #elif defined(__EDG__) + #define EASTL_TYPE_TRAIT_is_class_CONFORMANCE EASTL_TYPE_TRAIT_is_union_CONFORMANCE + + typedef char yes_array_type[1]; + typedef char no_array_type[2]; + template static yes_array_type& is_class_helper(void (U::*)()); + template static no_array_type& is_class_helper(...); + + template + struct is_class : public integral_constant(0)) == sizeof(yes_array_type) && !is_union::value + >{}; + #elif !defined(__GNUC__) || (((__GNUC__ * 100) + __GNUC_MINOR__) >= 304) // Not GCC or GCC 3.4+ + #define EASTL_TYPE_TRAIT_is_class_CONFORMANCE EASTL_TYPE_TRAIT_is_union_CONFORMANCE + + template static yes_type is_class_helper(void (U::*)()); + template static no_type is_class_helper(...); + + template + struct is_class : public integral_constant(0)) == sizeof(yes_type) && !is_union::value + >{}; + #else + #define EASTL_TYPE_TRAIT_is_class_CONFORMANCE 0 // is_class is not fully conforming. + + // GCC 2.x version, due to GCC being broken. + template + struct is_class : public false_type{}; + #endif + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_class_v = is_class::value; + #endif + + + + /////////////////////////////////////////////////////////////////////// + // is_polymorphic + // + // is_polymorphic::value == true if and only if T is a class or struct + // that declares or inherits a virtual function. is_polymorphic may only + // be applied to complete types. + // + /////////////////////////////////////////////////////////////////////// + + #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || defined(EA_COMPILER_GNUC) || (defined(__clang__) && EA_COMPILER_HAS_FEATURE(is_polymorphic))) + #define EASTL_TYPE_TRAIT_is_polymorphic_CONFORMANCE 1 // is_polymorphic is conforming. + + template + struct is_polymorphic : public integral_constant{}; + #else + #define EASTL_TYPE_TRAIT_is_polymorphic_CONFORMANCE 1 // is_polymorphic is conforming. + + template + struct is_polymorphic_imp1 + { + typedef typename remove_cv::type t; + + struct helper_1 : public t + { + helper_1(); + ~helper_1() throw(); + char pad[64]; + }; + + struct helper_2 : public t + { + helper_2(); + virtual ~helper_2() throw(); + #ifndef _MSC_VER + virtual void foo(); + #endif + char pad[64]; + }; + + static const bool value = (sizeof(helper_1) == sizeof(helper_2)); + }; + + template + struct is_polymorphic_imp2{ static const bool value = false; }; + + template + struct is_polymorphic_selector{ template struct rebind{ typedef is_polymorphic_imp2 type; }; }; + + template <> + struct is_polymorphic_selector{ template struct rebind{ typedef is_polymorphic_imp1 type; }; }; + + template + struct is_polymorphic_value{ + typedef is_polymorphic_selector::value> selector; + typedef typename selector::template rebind binder; + typedef typename binder::type imp_type; + static const bool value = imp_type::value; + }; + + template + struct is_polymorphic : public integral_constant::value>{}; + #endif + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_polymorphic_v = is_polymorphic::value; + #endif + + + + + /////////////////////////////////////////////////////////////////////// + // is_object + // + // is_object::value == true if and only if: + // is_reference::value == false, and + // is_function::value == false, and + // is_void::value == false + // + // The C++ standard, section 3.9p9, states: "An object type is a + // (possibly cv-qualified) type that is not a function type, not a + // reference type, and not incomplete (except for an incompletely + // defined object type). + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_object_CONFORMANCE (EASTL_TYPE_TRAIT_is_reference_CONFORMANCE && EASTL_TYPE_TRAIT_is_void_CONFORMANCE && EASTL_TYPE_TRAIT_is_function_CONFORMANCE) + + template + struct is_object : public integral_constant::value && !is_void::value && !is_function::value + >{}; + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_object_v = is_object::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_scalar + // + // is_scalar::value == true if and only if: + // is_arithmetic::value == true, or + // is_enum::value == true, or + // is_pointer::value == true, or + // is_member_pointer::value == true, or + // is_null_pointer::value == true + // + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_scalar_CONFORMANCE 1 // is_scalar is conforming. + + template + struct is_scalar : public integral_constant::value || is_enum::value || is_pointer::value || + is_member_pointer::value || + is_null_pointer::value> {}; + + template struct is_scalar : public true_type {}; + template struct is_scalar : public true_type {}; + template struct is_scalar : public true_type {}; + template struct is_scalar : public true_type {}; + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_scalar_v = is_scalar::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_compound + // + // Compound means anything but fundamental. See C++ standard, section 3.9.2. + // + // is_compound::value == true if and only if: + // is_fundamental::value == false + // + // Thus, is_compound::value == true if and only if: + // is_floating_point::value == false, and + // is_integral::value == false, and + // is_void::value == false + // + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_compound_CONFORMANCE EASTL_TYPE_TRAIT_is_fundamental_CONFORMANCE + + template + struct is_compound : public integral_constant::value>{}; + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_compound_v = is_compound::value; + #endif + + + + /////////////////////////////////////////////////////////////////////// + // decay + // + // Converts the type T to its decayed equivalent. That means doing + // lvalue to rvalue, array to pointer, function to pointer conversions, + // and removal of const and volatile. + // This is the type conversion silently applied by the compiler to + // all function arguments when passed by value. + + #define EASTL_TYPE_TRAIT_decay_CONFORMANCE 1 // decay is conforming. + + template + struct decay + { + typedef typename eastl::remove_reference::type U; + + typedef typename eastl::conditional< + eastl::is_array::value, + typename eastl::remove_extent::type*, + typename eastl::conditional< + eastl::is_function::value, + typename eastl::add_pointer::type, + typename eastl::remove_cv::type + >::type + >::type type; + }; + + + // decay_t is the C++14 using typedef for typename decay::type, though + // it requires only C++11 compiler functionality to implement. + // We provide a backwards-compatible means to access it through a macro for pre-C++11 compilers. + #if defined(EA_COMPILER_NO_TEMPLATE_ALIASES) + #define EASTL_DECAY_T(T) typename decay::type + #else + template + using decay_t = typename decay::type; + #define EASTL_DECAY_T(T) decay_t + #endif + + + /////////////////////////////////////////////////////////////////////// + // common_type + // + // Determines the common type among all types T..., that is the type all T... + // can be implicitly converted to. + // + // It is intended that this be specialized by the user for cases where it + // is useful to do so. Example specialization: + // template + // struct common_type{ typedef MyBaseClassB type; }; + // + // The member typedef type shall be defined as set out in 20.9.7.6,p3. All types in + // the parameter pack T shall be complete or (possibly cv) void. A program may + // specialize this trait if at least one template parameter in the specialization + // is a user-defined type. Note: Such specializations are needed when only + // explicit conversions are desired among the template arguments. + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_common_type_CONFORMANCE 1 // common_type is conforming. + + template + struct common_type; + + template + struct common_type + { typedef decay_t type; }; // Question: Should we use T or decay_t here? The C++11 Standard specifically (20.9.7.6,p3) specifies that it be without decay, but libc++ uses decay. + + template + struct common_type + { + typedef decay_t() : declval())> type; // The type of a tertiary expression is set by the compiler to be the common type of the two result types. + }; + + template + struct common_type + { typedef typename common_type::type, V...>::type type; }; + + + // common_type_t is the C++14 using typedef for typename common_type::type. + // We provide a backwards-compatible means to access it through a macro for pre-C++11 compilers. + #if defined(EA_COMPILER_NO_TEMPLATE_ALIASES) + #define EASTL_COMMON_TYPE_T(...) typename common_type<__VA_ARGS__>::type + #else + template + using common_type_t = typename common_type::type; + #define EASTL_COMMON_TYPE_T(...) common_type_t<__VA_ARGS__> + #endif + + /////////////////////////////////////////////////////////////////////// + // is_final + /////////////////////////////////////////////////////////////////////// + #if EASTL_IS_FINAL_AVAILABLE == 1 + template + struct is_final : public integral_constant {}; + #else + // no compiler support so we always return false + template + struct is_final : public false_type {}; + #endif + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_final_v = is_final::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_aggregate + // + // https://en.cppreference.com/w/cpp/language/aggregate_initialization + // + // An aggregate is one of the following types: + // * array type + // * class type (typically, struct or union), that has + // * no private or protected non-static data members + // * no user-provided constructors (explicitly defaulted or deleted constructors are allowed) + // * no user-provided, inherited, or explicit constructors + // * (explicitly defaulted or deleted constructors are allowed) + // * no virtual, private, or protected (since C++17) base classes + // * no virtual member functions + // * no default member initializers + // + /////////////////////////////////////////////////////////////////////// + #if EASTL_IS_AGGREGATE_AVAILABLE == 1 + #define EASTL_TYPE_TRAIT_is_aggregate_CONFORMANCE 1 + + template + struct is_aggregate : public integral_constant {}; + #else + #define EASTL_TYPE_TRAIT_is_aggregate_CONFORMANCE 0 + + // no compiler support so we always return false + template + struct is_aggregate : public false_type {}; + #endif + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_aggregate_v = is_aggregate::value; + #endif +} // namespace eastl + + +#endif // Header include guard + + + + diff --git a/external/EASTL/include/EASTL/internal/type_detected.h b/external/EASTL/include/EASTL/internal/type_detected.h new file mode 100644 index 00000000..e368a6fa --- /dev/null +++ b/external/EASTL/include/EASTL/internal/type_detected.h @@ -0,0 +1,180 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTERNAL_TYPE_DETECTED_H +#define EASTL_INTERNAL_TYPE_DETECTED_H + + +#include +#if defined(EA_PRAGMA_ONCE_SUPPORTED) +#pragma once +#endif + +#include + +namespace eastl +{ + /////////////////////////////////////////////////////////////////////// + // nonesuch + // + // Type given as a result from detected_t if the supplied arguments does not respect the constraint. + // + // https://en.cppreference.com/w/cpp/experimental/nonesuch + // + /////////////////////////////////////////////////////////////////////// + struct nonesuch + { + ~nonesuch() = delete; + nonesuch(nonesuch const&) = delete; + void operator=(nonesuch const&) = delete; + }; + + namespace internal + { + template class Op, class... Args> + struct detector + { + using type = Default; + using value_t = false_type; + }; + + template class Op, class... Args> + struct detector>, Op, Args...> + { + using type = Op; + using value_t = true_type; + }; + } // namespace internal + + /////////////////////////////////////////////////////////////////////// + // is_detected + // + // Checks if some supplied arguments (Args) respect a constraint (Op). + // is_detected expands to true_type if the arguments respect the constraint, false_type otherwise. + // This helper is convenient to use for compile time introspection. + // + // https://en.cppreference.com/w/cpp/experimental/is_detected + // + // Example: + // template + // using detect_can_use_addition_operator = decltype(declval() + declval()); + // + // template + // void sum(const T& t, const U& u) + // { + // static_assert(is_detected::value, "Supplied types cannot be summedtogether."); + // // or... + // static_assert(is_detected_v, "Supplied types cannot be summedtogether."); + // return t + u; + // } + // + /////////////////////////////////////////////////////////////////////// + template