diff --git a/.clang-format b/.clang-format new file mode 100644 index 0000000..6767e56 --- /dev/null +++ b/.clang-format @@ -0,0 +1,142 @@ +--- +AlignAfterOpenBracket: Align +AlignArrayOfStructures: Right +AlignConsecutiveAssignments: + Enabled: true + AcrossEmptyLines: false + AcrossComments: false + AlignCompound: true + PadOperators: true +AlignConsecutiveBitFields: + Enabled: true + AcrossEmptyLines: false + AcrossComments: false + AlignCompound: true + PadOperators: true +AlignConsecutiveDeclarations: + Enabled: false +AlignConsecutiveMacros: + Enabled: true + AcrossEmptyLines: false + AcrossComments: false +AlignEscapedNewlines: Right +AlignOperands: true +AlignTrailingComments: + Kind: Always + OverEmptyLines: 0 +AllowAllArgumentsOnNextLine: False +AllowAllParametersOfDeclarationOnNextLine: False +# clang-18 +#AllowBreakBeforeNoexceptSpecifier: Never +AllowShortBlocksOnASingleLine: Never +AllowShortCaseLabelsOnASingleLine: False +AllowShortEnumsOnASingleLine: False +AllowShortFunctionsOnASingleLine: Empty +AllowShortIfStatementsOnASingleLine: Never +AllowShortLambdasOnASingleLine: Empty +AllowShortLoopsOnASingleLine: False +AlwaysBreakAfterReturnType: None +AlwaysBreakBeforeMultilineStrings: False +AlwaysBreakTemplateDeclarations: Yes +BinPackArguments: False +BinPackParameters: False +BitFieldColonSpacing: Both +BreakBeforeBraces: Custom +BraceWrapping: + AfterCaseLabel: True + AfterClass: True + AfterControlStatement: Always + AfterEnum: True + AfterFunction: True + AfterNamespace: False + AfterObjCDeclaration: True + AfterStruct: True + AfterUnion: True + AfterExternBlock: True + BeforeCatch: True + BeforeElse: True + BeforeLambdaBody: True + BeforeWhile: True + IndentBraces: False + SplitEmptyFunction: False + SplitEmptyRecord: False + SplitEmptyNamespace: False +# clang-17 +#BracedInitializerIndentWidth: 2 +BreakAfterAttributes: Always +BreakArrays: False +BreakBeforeBinaryOperators: NonAssignment +BreakBeforeConceptDeclarations: Always +BreakBeforeInlineASMColon: OnlyMultiline +BreakBeforeTernaryOperators: True +BreakConstructorInitializers: AfterColon +BreakInheritanceList: AfterComma +BreakStringLiterals: False +ColumnLimit: 120 +CompactNamespaces: True +ContinuationIndentWidth: 2 +Cpp11BracedListStyle: True +EmptyLineAfterAccessModifier: Never +EmptyLineBeforeAccessModifier: Always +FixNamespaceComments: true +IndentAccessModifiers: False +IndentCaseBlocks: True +IndentCaseLabels: True +IndentExternBlock: Indent +IndentGotoLabels: False +IndentPPDirectives: AfterHash +IndentWidth: 2 +IndentWrappedFunctionNames: False +InsertBraces: False +InsertNewlineAtEOF: True +IntegerLiteralSeparator: + Binary: 0 + Decimal: 3 + Hex: 0 +KeepEmptyLinesAtTheStartOfBlocks: False +LambdaBodyIndentation: Signature +Language: Cpp +MaxEmptyLinesToKeep: 1 +NamespaceIndentation: None +PackConstructorInitializers: Never +PointerAlignment: Left +QualifierAlignment: Left +ReferenceAlignment: Left +ReflowComments: True +RemoveBracesLLVM: False +# clang-17 +#RemoveParentheses: False +RemoveSemicolon: False +RequiresClausePosition: OwnLine +RequiresExpressionIndentation: OuterScope +SeparateDefinitionBlocks: Always +ShortNamespaceLines: 0 +SortIncludes: CaseInsensitive +SortUsingDeclarations: LexicographicNumeric +SpaceAfterCStyleCast: False +SpaceAfterLogicalNot: False +SpaceAfterTemplateKeyword: False +SpaceAroundPointerQualifiers: Default +SpaceBeforeAssignmentOperators: True +SpaceBeforeCaseColon: False +SpaceBeforeCpp11BracedList: False +SpaceBeforeCtorInitializerColon: False +SpaceBeforeInheritanceColon: False +SpaceBeforeParens: Never +SpaceBeforeRangeBasedForLoopColon: False +SpaceBeforeSquareBrackets: False +SpaceInEmptyBlock: False +SpaceInEmptyParentheses: False +SpacesBeforeTrailingComments: 1 +SpacesInAngles: Always +SpacesInCStyleCastParentheses: False +SpacesInConditionalStatement: True +SpacesInContainerLiterals: True +SpacesInLineCommentPrefix: + Minimum: 1 + Maximum: -1 +SpacesInParentheses: True +SpacesInSquareBrackets: True +Standard: c++20 +TabWidth: 2 +UseTab: Never diff --git a/.github/workflows/add_issue_to_project.yml b/.github/workflows/add_issue_to_project.yml deleted file mode 100644 index e8f4b83..0000000 --- a/.github/workflows/add_issue_to_project.yml +++ /dev/null @@ -1,19 +0,0 @@ -name: Assign issue to project - -on: - issues: - types: [opened] - -jobs: - assign_to_project: - runs-on: ubuntu-latest - name: Assign issue to project - steps: - - - name: Assign issue with `bug` or `enhancement` label to project - uses: actions/add-to-project@v0.4.0 - with: - project-url: https://github.com/orgs/koinos/projects/6 - github-token: ${{ secrets.ADD_TO_PROJECT_TOKEN }} - labeled: bug, enhancement - label-operator: OR diff --git a/.github/workflows/add_pr_to_project.yml b/.github/workflows/add_pr_to_project.yml deleted file mode 100644 index 31c6c97..0000000 --- a/.github/workflows/add_pr_to_project.yml +++ /dev/null @@ -1,17 +0,0 @@ -name: Assign pull request to project - -on: - pull_request: - types: [opened] - -jobs: - assign_to_project: - runs-on: ubuntu-latest - name: Assign pull request to project - steps: - - - name: Assign pull request to project - uses: actions/add-to-project@v0.4.0 - with: - project-url: https://github.com/orgs/koinos/projects/6 - github-token: ${{ secrets.ADD_TO_PROJECT_TOKEN }} diff --git a/.github/workflows/add_to_project.yml b/.github/workflows/add_to_project.yml deleted file mode 100644 index e9d82be..0000000 --- a/.github/workflows/add_to_project.yml +++ /dev/null @@ -1,13 +0,0 @@ -name: Auto assign to project - -on: - issues: - types: [opened] - pull_request: - types: [opened] - -jobs: - assign_to_project: - uses: koinos/koinos-github-workflows/.github/workflows/add_to_project.yml@master - secrets: - github-token: ${{ secrets.ISSUE_MANAGEMENT_TOKEN }} diff --git a/.github/workflows/close_issues.yml b/.github/workflows/close_issues.yml deleted file mode 100644 index 78ff5a3..0000000 --- a/.github/workflows/close_issues.yml +++ /dev/null @@ -1,25 +0,0 @@ -name: Close inactive issues - -on: - schedule: - - cron: "0 0 * * *" # Run every day at midnight - -jobs: - close-issues: - runs-on: ubuntu-latest - permissions: - issues: write - pull-requests: write - steps: - - uses: actions/stale@v7 - with: - exempt-issue-labels: "story,task,research" - days-before-stale: 30 - days-before-close: 14 - stale-issue-label: "stale" - stale-pr-label: "stale" - stale-issue-message: "This issue is stale because it has been open for 30 days with no activity." - close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale." - stale-pr-message: "This pull request is stale because it has been open for 30 days with no activity." - close-pr-message: "This pull request was closed because it has been inactive for 14 days since being marked as stale." - repo-token: ${{ secrets.ISSUE_MANAGEMENT_TOKEN }} diff --git a/.travis.yml b/.travis.yml index 4391c15..a7e91fa 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,47 +2,115 @@ language: c++ cache: ccache: true - pip: true addons: apt: - packages: - - python3 - - python3-pip - - python3-setuptools update: true + packages: + - clang + - clang-format + - llvm + - llvm-dev + - lcov + - ruby + - gcc-12 + - g++-12 + +env: + global: + - CMAKE_C_COMPILER_LAUNCHER=ccache + - CMAKE_CXX_COMPILER_LAUNCHER=ccache jobs: include: - - os: linux + - name: "Static Analysis" + os: linux dist: jammy env: - - RUN_TYPE=coverage - - MATRIX_EVAL="CC=clang-11 && CXX=clang++-11" - - os: linux + - CC=clang + - CXX=clang++ + before_script: + - cmake -DCMAKE_BUILD_TYPE=Debug -DSTATIC_ANALYSIS=ON . + script: + - cmake --build . --config Debug --parallel 3 + + - name: "Sanitizer" + os: linux dist: jammy env: - - RUN_TYPE=test - - MATRIX_EVAL="CC=gcc && CXX=g++" - - os: linux + - CC=clang + - CXX=clang++ + before_script: + - mkdir build-address + - pushd build-address + - cmake -DCMAKE_BUILD_TYPE=Debug -DSANITIZER=Address .. + - cmake --build . --config Debug --parallel 3 + - popd + - mkdir build-stack + - pushd build-stack + - cmake -DCMAKE_BUILD_TYPE=Debug -DSANITIZER=Stack .. + - cmake --build . --config Debug --parallel 3 + - popd + - mkdir build-thread + - pushd build-thread + - cmake -DCMAKE_BUILD_TYPE=Debug -DSANITIZER=Thread .. + - cmake --build . --config Debug --parallel 3 + - popd + script: + - pushd build-address/tests + - ctest -j1 --output-on-failure + - popd + - pushd build-stack/tests + - ctest -j1 --output-on-failure + - popd + - pushd build-thread/tests + - ctest -j1 --output-on-failure + + - name: "Coverage" + os: linux dist: jammy env: - - RUN_TYPE=test - - MATRIX_EVAL="CC=clang-11 && CXX=clang++-11" - -before_install: - - eval "${MATRIX_EVAL}" + - CC=clang + - CXX=clang++ + install: + - sudo gem install coveralls-lcov + before_script: + - mkdir build + - cd build + - cmake -DCMAKE_BUILD_TYPE=Debug -DCOVERAGE=ON .. + script: + - cmake --build . --config Debug --parallel 3 --target coverage + after_success: + - coveralls-lcov --repo-token $COVERALLS_REPO_TOKEN --service-name travis-pro --service-job-id $TRAVIS_JOB_ID ./coverage.info -install: - - ci/install.sh - -script: - - ci/build.sh && ci/test.sh + - name: "GCC Unit Tests" + os: linux + dist: jammy + env: + - CC=gcc-12 + - CXX=g++-12 + before_script: + - cmake -DCMAKE_BUILD_TYPE=Release . + - cmake --build . --config Release --parallel 3 + script: + - cd tests + - ctest -j3 --output-on-failure -after_success: - - ci/after_success.sh + - name: "Clang Unit Tests and Formatting" + os: linux + dist: jammy + env: + - CC=clang + - CXX=clang++ + before_script: + - cmake -DCMAKE_BUILD_TYPE=Release . + - cmake --build . --config Release --parallel 3 + script: + - cmake --build . --config Release --parallel 3 --target format.check + - cd tests + - ctest -j3 --output-on-failure notifications: slack: - secure: Fx5AGddmmeZqZBUbRdI2/zLaiWy7H+afXEWLYSEnu8+bf6sHCOTPvDsHbgPRZikAdSCa0GaxDzqVeFcOFYgX0+XPQgpY4Q2mBwrNViajxArvJXghA/4DfAAUx7knr22RbZjUsEnp2wEGU6Rp9UHo3Y+IOwXR/2v6BnQ1ntMzS1Np1X4HegHXxArrfmbx6JU+43CXajNJGcx9Bhdwnp6puCRQ+gGUz4+tSwy/0UTX4APTGKVqxuhOqRlpVkbCh1g4l6wDJ5gBP0AXM3d2BTmQYdgL6/BwqJG4REjSp6GSaoEOa22cnPtbmXnWY7/npb6uVRVo/WFvJJt6SPGOb7QJn03j/bEK1HCCIdJZR7MwxpAt7vAkihBkkb65izrKXAF05Y5dgZvJsRUCQNr3aDarPPJzU0hLV2g1o1QinuJayUtbXv3/YiP1aqM/zKe4v+J17El6X+2z+Pwvs53brOMpTIfPL/+vCNHreTKHYD90SfU44R/ObwhqS0F+pmbLwarcnwoA0ADBuorGg2HMBEU+GrTxTQiNj0p2SO+2FTFsp4AJ4LhEDsjlrvAQ2TFH3oDIpoR+IN1Pcoi4oJQ7WvFonNVj0Ndw5uJDzPMqMPy3DKNNGtUWaGy+kTHN12kWaid7HTqUDS6hye9Lq4msGwQXdmRyNjB6BENdX2Pt8L0LtEU= + secure: dO+bR69Uu8PBZcFn2Et0ES66sZfxbOytMJBPLagMnp4yfjmeowKgjvzYaGu/LcW8PEHL0EWN+ZJqa3DIq/y6LMQP8E3WcyKZa8U6i48j8+RHgfbfd2PyXuGO6LmLdGVkndxX0MA3oQaKjk1iPwvjdB883Pm6hEFg97+ia0rD+RrfeiCJQFvDGn8QyLHMQQEL1JuTedZffTtBIgep7LPC7CEzaejx8ic/l6/FrqjCEIpTw82pPm2r43wNi+d4X5ieXzk09sy9lzO3MnBXrFVZmFu4hpg1Z1m+EyWUCtYuRGtN78Cc5DBVQq8l50PQge2OKLP5OBCOP97VZ78Z4MgkubpqDItABYEeO+O+jViGj/yXQq2PrOMscrSzvHsAOEtCpJkPftKj7cnCyjLBfAJ3S4zB4id3uRoV+vgmMRJAqDXEuYWssg5V+Nwy06W/3ObWp06p5eR1t8naGksy9X+/pYMz9CTdgL+LoGOhYXCA5yw1ajh2cS63S6GxRcP8GSQT0w2VzK5EFL2KmKWfqgxZ1OPFuO8R6nnf/iwwxI90r1FSzP4x+0MSBh5pN4Ic75/CFQVaYF+rgfmb85aFyxB209GQTrDwW9qKTvAta948xhEE31Mht6lSey8zhFNLNGgHKDflxciMu4DA1G6IuI5YBZgYFJgZbZ5IUrOndtMAZ+M= on_success: never diff --git a/CMakeLists.txt b/CMakeLists.txt index 9649297..fbf1cf5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,169 +1,43 @@ -cmake_minimum_required(VERSION 3.10.2) +cmake_minimum_required(VERSION 3.19.0) -find_program(CCACHE_PROGRAM ccache) -if(CCACHE_PROGRAM) - set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "${CCACHE_PROGRAM}") - set(CMAKE_XCODE_ATTRIBUTE_CC "${CMAKE_SOURCE_DIR}/ci/ccache_clang") - set(CMAKE_XCODE_ATTRIBUTE_CXX "${CMAKE_SOURCE_DIR}/ci/ccache_clang++") - set(CMAKE_XCODE_ATTRIBUTE_LD "${CMAKE_SOURCE_DIR}/ci/ccache_clang") - set(CMAKE_XCODE_ATTRIBUTE_LDPLUSPLUS "${CMAKE_SOURCE_DIR}/ci/ccache_clang++") -endif() +cmake_policy(VERSION 3.19.0...3.27.4) -option(HUNTER_RUN_UPLOAD "Upload Hunter packages to binary cache server" OFF) +include(FetchContent) +FetchContent_Declare( + koinos_cmake + GIT_REPOSITORY https://github.com/koinos/koinos-cmake.git + GIT_TAG 0c8433a118eb4cd5a86bb7ac4708a38db8166801) +FetchContent_MakeAvailable(koinos_cmake) -set( - HUNTER_CACHE_SERVERS - "https://github.com/koinos/hunter-cache" - CACHE - STRING - "Default cache server" -) - -set( - HUNTER_PASSWORDS_PATH - "${CMAKE_CURRENT_LIST_DIR}/cmake/Hunter/passwords.cmake" - CACHE - FILEPATH - "Hunter passwords" -) - -include("cmake/HunterGate.cmake") - -HunterGate( - URL "https://github.com/cpp-pm/hunter/archive/v0.24.14.tar.gz" - SHA1 "00901c19eefc02d24b16705b5f5a2b4f093a73fb" - LOCAL -) - -project(koinos_state_db VERSION 1.1.0 LANGUAGES CXX C) - -# -# CONFIGURATION -# -include(GNUInstallDirs) - -set(KOINOS_LIB_TARGET_NAME ${PROJECT_NAME}) -set(KOINOS_LIB_TARGET_SHORT_NAME "state_db") -set(KOINOS_LIB_CONFIG_INSTALL_DIR "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}" CACHE INTERNAL "") -set(KOINOS_LIB_INCLUDE_INSTALL_DIR "${CMAKE_INSTALL_INCLUDEDIR}") -set(KOINOS_LIB_TARGETS_EXPORT_NAME "${PROJECT_NAME}Targets") -set(KOINOS_LIB_CMAKE_CONFIG_TEMPLATE "cmake/config.cmake.in") -set(KOINOS_LIB_CMAKE_CONFIG_DIR "${CMAKE_CURRENT_BINARY_DIR}") -set(KOINOS_LIB_CMAKE_VERSION_CONFIG_FILE "${KOINOS_LIB_CMAKE_CONFIG_DIR}/${PROJECT_NAME}ConfigVersion.cmake") -set(KOINOS_LIB_CMAKE_PROJECT_CONFIG_FILE "${KOINOS_LIB_CMAKE_CONFIG_DIR}/${PROJECT_NAME}Config.cmake") -set(KOINOS_LIB_CMAKE_PROJECT_TARGETS_FILE "${KOINOS_LIB_CMAKE_CONFIG_DIR}/${PROJECT_NAME}Targets.cmake") -set(KOINOS_LIB_PKGCONFIG_INSTALL_DIR "${CMAKE_INSTALL_LIBDIR}/pkgconfig") - -if (${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.12.0") - cmake_policy(SET CMP0074 NEW) -endif () - -option(BUILD_TESTS "Build Tests" ON) -option(FORCE_COLORED_OUTPUT "Always produce ANSI-colored output (GNU/Clang only)." OFF) - -# This is to force color output when using ccache with Unix Makefiles -if(${FORCE_COLORED_OUTPUT}) - if( "${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU" ) - add_compile_options (-fdiagnostics-color=always) - elseif( "${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" OR "${CMAKE_CXX_COMPILER_ID}" STREQUAL "AppleClang" ) - add_compile_options (-fcolor-diagnostics) - endif () -endif () +include("${koinos_cmake_SOURCE_DIR}/Koinos.cmake") -list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake") +project(koinos_state_db + VERSION 1.1.0 + DESCRIPTION "The Koinos statedb library" + LANGUAGES CXX C) -set(CMAKE_CXX_STANDARD 17) -set(CMAKE_CXX_STANDARD_REQUIRED ON) -set(CMAKE_CXX_EXTENSIONS OFF) -set(CMAKE_CXX_VISIBILITY_PRESET hidden) -set(Boost_NO_BOOST_CMAKE ON) +koinos_define_version() -if(COVERAGE) - include(CodeCoverage) - append_coverage_compiler_flags() - setup_target_for_coverage_lcov( - NAME coverage - LCOV_ARGS "--quiet" "--no-external" - EXECUTABLE koinos_state_db_tests - EXCLUDE "libraries/vendor/*" "build/generated/*") -endif() - -hunter_add_package(Boost COMPONENTS test exception log) -hunter_add_package(ethash) -hunter_add_package(libsecp256k1-vrf) -hunter_add_package(nlohmann_json) -hunter_add_package(OpenSSL) -hunter_add_package(rocksdb) -hunter_add_package(yaml-cpp) -hunter_add_package(Protobuf) -hunter_add_package(gRPC) -hunter_add_package(abseil) -hunter_add_package(re2) -hunter_add_package(c-ares) -hunter_add_package(ZLIB) - -hunter_add_package(koinos_util) -hunter_add_package(koinos_log) -hunter_add_package(koinos_exception) -hunter_add_package(koinos_proto) -hunter_add_package(koinos_crypto) - -find_package(Boost CONFIG REQUIRED COMPONENTS program_options log log_setup exception) -find_package(RocksDB CONFIG REQUIRED) -find_package(Protobuf CONFIG REQUIRED) -find_package(ethash CONFIG REQUIRED) -find_package(libsecp256k1-vrf CONFIG REQUIRED) -find_package(nlohmann_json CONFIG REQUIRED) -find_package(OpenSSL REQUIRED) -find_package(yaml-cpp CONFIG REQUIRED) -find_package(gRPC CONFIG REQUIRED) -find_package(absl CONFIG REQUIRED) -find_package(re2 CONFIG REQUIRED) -find_package(c-ares CONFIG REQUIRED) -find_package(ZLIB CONFIG REQUIRED) - -find_package(koinos_util CONFIG REQUIRED) -find_package(koinos_log CONFIG REQUIRED) -find_package(koinos_exception CONFIG REQUIRED) -find_package(koinos_proto CONFIG REQUIRED) -find_package(koinos_crypto CONFIG REQUIRED) - - -add_subdirectory(libraries) -if (BUILD_TESTS) - add_subdirectory(tests) -endif() - -# Install a pkg-config file, so other tools can find this. -configure_file( - "${CMAKE_CURRENT_SOURCE_DIR}/cmake/pkg-config.pc.in" - "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}.pc" +koinos_add_package(Boost CONFIG REQUIRED + ADD_COMPONENTS log exception program_options test + FIND_COMPONENTS log log_setup exception program_options ) -# -# INSTALL -# install header files, generate and install cmake config files for find_package() -# -include(CMakePackageConfigHelpers) - -configure_file( - "cmake/${PROJECT_NAME}ConfigVersion.cmake.in" - ${KOINOS_LIB_CMAKE_VERSION_CONFIG_FILE} - @ONLY -) -configure_file( - ${KOINOS_LIB_CMAKE_CONFIG_TEMPLATE} - ${KOINOS_LIB_CMAKE_PROJECT_CONFIG_FILE} - @ONLY -) +koinos_add_package(rocksdb NAME RocksDB CONFIG REQUIRED) +koinos_add_package(Protobuf CONFIG REQUIRED) +koinos_add_package(ethash CONFIG REQUIRED) +koinos_add_package(libsecp256k1-vrf CONFIG REQUIRED) +koinos_add_package(nlohmann_json CONFIG REQUIRED) +koinos_add_package(OpenSSL REQUIRED) +koinos_add_package(yaml-cpp CONFIG REQUIRED) +koinos_add_package(gRPC CONFIG REQUIRED) -install( - FILES ${KOINOS_LIB_CMAKE_PROJECT_CONFIG_FILE} ${KOINOS_LIB_CMAKE_VERSION_CONFIG_FILE} - DESTINATION ${KOINOS_LIB_CONFIG_INSTALL_DIR} -) +koinos_add_package(koinos_proto CONFIG REQUIRED) +koinos_add_package(koinos_exception CONFIG REQUIRED) +koinos_add_package(koinos_log CONFIG REQUIRED) +koinos_add_package(koinos_crypto CONFIG REQUIRED) +koinos_add_package(koinos_util CONFIG REQUIRED) -install( - FILES "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}.pc" - DESTINATION ${KOINOS_LIB_PKGCONFIG_INSTALL_DIR} -) +add_subdirectory(src) +add_subdirectory(tests) diff --git a/LICENSE.md b/LICENSE.md index ba4f52c..945eead 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -4,6 +4,4 @@ Permission is hereby granted, free of charge, to any person obtaining a copy of The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. -The Software is not used with any blockchain or blockchain fork that is not recognized by Koinos Group, LLC in writing prior to Dec 31, 2022. - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/README.md b/README.md index a9c4c82..94196af 100644 --- a/README.md +++ b/README.md @@ -1 +1,79 @@ -# koinos-repo-template +# Koinos StateDB Cpp + +This library implements StateDB, a fork aware persistent database, for the Koinos Blockchain Framework. + +### Project Structure + +This project's structure follows the [Pitchfork](https://api.csswg.org/bikeshed/?force=1&url=https://raw.githubusercontent.com/vector-of-bool/pitchfork/develop/data/spec.bs) specification. + +``` +├── build/ # An ephemeral directory for building the project. Not checked in, but excluded via .gitignore. +├── include/ # Contains all public headers for the Koinos StateDB. +├── src/ # Contains all source code and private headers for Koinos StateDB. +└── tests/ # Contains tests for Koinos StateDB. +``` + +### Building + +Koinos StateDB's build process is configured using CMake. Additionally, all dependencies are managed through Hunter, a CMake drive package manager for C/C++. This means that all dependencies are downloaded and built during configuration rather than relying on system installed libraries. + +``` +mkdir build +cd build +cmake -D CMAKE_BUILD_TYPE=Release .. +cmake --build . --config Release --parallel +``` + +You can optionally run static analysis with Clang-Tidy during the build process. Static analysis is checked in CI and is required to pass before merging pull requests. + +``` +cmake -D CMAKE_BUILD_TYPE=Debug -D STATIC_ANALYSIS=ON .. +``` + +### Testing + +Tests are built by default as target `state_db_tests`. You can building them specifically with: + +``` +cmake --build . --config Release --parallel --target state_db_tests +``` + +Tests can be invoked from the tests directiory within the build directory. + +``` +cd tests +./state_db_tests +``` + +Tests can also be ran in parallel using CTest. + +``` +cd tests +ctest -j +``` + +You can also generate a coverage report. + +``` +cmake -D CMAKE_BUILD_TYPE=Debug -D COVERAGE=ON .. +cmake --build . --config Debug --parallel 3 --target coverage +``` + +You can run tests in different sanitizer profiles. Those profiles are None (Default), Address, Stack, and Thread. Currently, these are only known to work with clang, but may work with gcc with additional environment configuration. + +``` +cmake -D CMAKE_BUILD_TYPE=Debug -D SANITIZER=Address .. +cmake --build . --config Debug --parallel --target state_db_tests +cd tests +ctest -j +``` + +### Formatting + +Formatting of the source code is enforced by ClangFormat. If ClangFormat is installed, build targets will be automatically generated. You can review the library's code style by uploading the included `.clang-format` to https://clang-format-configurator.site/. + +You can build `format.check` to check formattting and `format.fix` to attempt to automatically fix formatting. It is recommended to check and manually fix formatting as automatic formatting can unintentionally change code. + +### Contributing + +As an open source project, contributions are welcome and appreciated. Before contributing, please read our [Contribution Guidelines](CONTRIBUTING.md). diff --git a/ci/after_success.sh b/ci/after_success.sh deleted file mode 100755 index c03c7ab..0000000 --- a/ci/after_success.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -if [ "$RUN_TYPE" = "coverage" ]; then - coveralls-lcov --repo-token "$COVERALLS_REPO_TOKEN" --service-name travis-pro --service-job-id "$TRAVIS_JOB_ID" ./build/coverage.info -fi - diff --git a/ci/build.sh b/ci/build.sh deleted file mode 100755 index f503915..0000000 --- a/ci/build.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - -set -e -set -x - -mkdir build -cd build - -if [ "$RUN_TYPE" = "test" ]; then - cmake -DCMAKE_BUILD_TYPE=Release .. - cmake --build . --config Release --parallel 3 -elif [ "$RUN_TYPE" = "coverage" ]; then - cmake -DCMAKE_BUILD_TYPE=Debug -DCOVERAGE=ON .. - cmake --build . --config Debug --parallel 3 --target coverage -fi diff --git a/ci/ccache_clang b/ci/ccache_clang deleted file mode 100755 index 7aef1a2..0000000 --- a/ci/ccache_clang +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/sh - -exec ccache clang "$@" diff --git a/ci/ccache_clang++ b/ci/ccache_clang++ deleted file mode 100755 index 49b6341..0000000 --- a/ci/ccache_clang++ +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/sh - -exec ccache clang++ "$@" diff --git a/ci/install.sh b/ci/install.sh deleted file mode 100755 index 4024a3f..0000000 --- a/ci/install.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash - -sudo apt-get install -yq --allow-downgrades libc6=2.31-0ubuntu9.2 libc6-dev=2.31-0ubuntu9.2 - sudo -E apt-get -yq --no-install-suggests --no-install-recommends --allow-downgrades --allow-remove-essential --allow-change-held-packages install clang-11 llvm-11 -o Debug::pkgProblemResolver=yes - -if [ "$RUN_TYPE" = "coverage" ]; then - sudo apt-get install -y lcov ruby valgrind - sudo gem install coveralls-lcov -fi - -pip3 install --user dataclasses-json Jinja2 importlib_resources pluginbase gitpython diff --git a/ci/test.sh b/ci/test.sh deleted file mode 100755 index 60cf747..0000000 --- a/ci/test.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash - -set -e -set -x - -cd $(dirname "$0")/../build/tests -if [ "$RUN_TYPE" = "test" ]; then - exec ctest -j3 --output-on-failure -elif [ "$RUN_TYPE" = "coverage" ]; then - exec valgrind --error-exitcode=1 --leak-check=yes ./koinos_state_db_tests -fi diff --git a/cmake/CodeCoverage.cmake b/cmake/CodeCoverage.cmake deleted file mode 100644 index 4627c42..0000000 --- a/cmake/CodeCoverage.cmake +++ /dev/null @@ -1,444 +0,0 @@ -# Copyright (c) 2012 - 2017, Lars Bilke -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without modification, -# are permitted provided that the following conditions are met: -# -# 1. Redistributions of source code must retain the above copyright notice, this -# list of conditions and the following disclaimer. -# -# 2. Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# 3. Neither the name of the copyright holder nor the names of its contributors -# may be used to endorse or promote products derived from this software without -# specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# CHANGES: -# -# 2012-01-31, Lars Bilke -# - Enable Code Coverage -# -# 2013-09-17, Joakim Söderberg -# - Added support for Clang. -# - Some additional usage instructions. -# -# 2016-02-03, Lars Bilke -# - Refactored functions to use named parameters -# -# 2017-06-02, Lars Bilke -# - Merged with modified version from github.com/ufz/ogs -# -# 2019-05-06, Anatolii Kurotych -# - Remove unnecessary --coverage flag -# -# 2019-12-13, FeRD (Frank Dana) -# - Deprecate COVERAGE_LCOVR_EXCLUDES and COVERAGE_GCOVR_EXCLUDES lists in favor -# of tool-agnostic COVERAGE_EXCLUDES variable, or EXCLUDE setup arguments. -# - CMake 3.4+: All excludes can be specified relative to BASE_DIRECTORY -# - All setup functions: accept BASE_DIRECTORY, EXCLUDE list -# - Set lcov basedir with -b argument -# - Add automatic --demangle-cpp in lcovr, if 'c++filt' is available (can be -# overridden with NO_DEMANGLE option in setup_target_for_coverage_lcovr().) -# - Delete output dir, .info file on 'make clean' -# - Remove Python detection, since version mismatches will break gcovr -# - Minor cleanup (lowercase function names, update examples...) -# -# 2019-12-19, FeRD (Frank Dana) -# - Rename Lcov outputs, make filtered file canonical, fix cleanup for targets -# -# 2020-01-19, Bob Apthorpe -# - Added gfortran support -# -# 2020-02-17, FeRD (Frank Dana) -# - Make all add_custom_target()s VERBATIM to auto-escape wildcard characters -# in EXCLUDEs, and remove manual escaping from gcovr targets -# -# USAGE: -# -# 1. Copy this file into your cmake modules path. -# -# 2. Add the following line to your CMakeLists.txt (best inside an if-condition -# using a CMake option() to enable it just optionally): -# include(CodeCoverage) -# -# 3. Append necessary compiler flags: -# append_coverage_compiler_flags() -# -# 3.a (OPTIONAL) Set appropriate optimization flags, e.g. -O0, -O1 or -Og -# -# 4. If you need to exclude additional directories from the report, specify them -# using full paths in the COVERAGE_EXCLUDES variable before calling -# setup_target_for_coverage_*(). -# Example: -# set(COVERAGE_EXCLUDES -# '${PROJECT_SOURCE_DIR}/src/dir1/*' -# '/path/to/my/src/dir2/*') -# Or, use the EXCLUDE argument to setup_target_for_coverage_*(). -# Example: -# setup_target_for_coverage_lcov( -# NAME coverage -# EXECUTABLE testrunner -# EXCLUDE "${PROJECT_SOURCE_DIR}/src/dir1/*" "/path/to/my/src/dir2/*") -# -# 4.a NOTE: With CMake 3.4+, COVERAGE_EXCLUDES or EXCLUDE can also be set -# relative to the BASE_DIRECTORY (default: PROJECT_SOURCE_DIR) -# Example: -# set(COVERAGE_EXCLUDES "dir1/*") -# setup_target_for_coverage_gcovr_html( -# NAME coverage -# EXECUTABLE testrunner -# BASE_DIRECTORY "${PROJECT_SOURCE_DIR}/src" -# EXCLUDE "dir2/*") -# -# 5. Use the functions described below to create a custom make target which -# runs your test executable and produces a code coverage report. -# -# 6. Build a Debug build: -# cmake -DCMAKE_BUILD_TYPE=Debug .. -# make -# make my_coverage_target -# - -include(CMakeParseArguments) - -# Check prereqs -find_program( GCOV_PATH gcov ) -find_program( LCOV_PATH NAMES lcov lcov.bat lcov.exe lcov.perl) -find_program( GENHTML_PATH NAMES genhtml genhtml.perl genhtml.bat ) -find_program( LLVM_COV_PATH llvm-cov ) -find_program( GCOVR_PATH gcovr PATHS ${CMAKE_SOURCE_DIR}/scripts/test) -find_program( CPPFILT_PATH NAMES c++filt ) - -if(NOT GCOV_PATH) - message(FATAL_ERROR "gcov not found! Aborting...") -endif() # NOT GCOV_PATH - -if("${CMAKE_CXX_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang") - if("${CMAKE_CXX_COMPILER_VERSION}" VERSION_LESS 3) - message(FATAL_ERROR "Clang version must be 3.0.0 or greater! Aborting...") - endif() -elseif(NOT CMAKE_COMPILER_IS_GNUCXX) - if("${CMAKE_Fortran_COMPILER_ID}" MATCHES "[Ff]lang") - # Do nothing; exit conditional without error if true - elseif("${CMAKE_Fortran_COMPILER_ID}" MATCHES "GNU") - # Do nothing; exit conditional without error if true - else() - message(FATAL_ERROR "Compiler is not GNU gcc! Aborting...") - endif() -endif() - -set(COVERAGE_COMPILER_FLAGS "-g -fprofile-arcs -ftest-coverage" - CACHE INTERNAL "") - -set(CMAKE_Fortran_FLAGS_COVERAGE - ${COVERAGE_COMPILER_FLAGS} - CACHE STRING "Flags used by the Fortran compiler during coverage builds." - FORCE ) -set(CMAKE_CXX_FLAGS_COVERAGE - ${COVERAGE_COMPILER_FLAGS} - CACHE STRING "Flags used by the C++ compiler during coverage builds." - FORCE ) -set(CMAKE_C_FLAGS_COVERAGE - ${COVERAGE_COMPILER_FLAGS} - CACHE STRING "Flags used by the C compiler during coverage builds." - FORCE ) -set(CMAKE_EXE_LINKER_FLAGS_COVERAGE - "" - CACHE STRING "Flags used for linking binaries during coverage builds." - FORCE ) -set(CMAKE_SHARED_LINKER_FLAGS_COVERAGE - "" - CACHE STRING "Flags used by the shared libraries linker during coverage builds." - FORCE ) -mark_as_advanced( - CMAKE_Fortran_FLAGS_COVERAGE - CMAKE_CXX_FLAGS_COVERAGE - CMAKE_C_FLAGS_COVERAGE - CMAKE_EXE_LINKER_FLAGS_COVERAGE - CMAKE_SHARED_LINKER_FLAGS_COVERAGE ) - -if(NOT CMAKE_BUILD_TYPE STREQUAL "Debug") - message(WARNING "Code coverage results with an optimised (non-Debug) build may be misleading") -endif() # NOT CMAKE_BUILD_TYPE STREQUAL "Debug" - -if(CMAKE_C_COMPILER_ID STREQUAL "GNU" OR CMAKE_Fortran_COMPILER_ID STREQUAL "GNU") - link_libraries(gcov) -endif() - -# Defines a target for running and collection code coverage information -# Builds dependencies, runs the given executable and outputs reports. -# NOTE! The executable should always have a ZERO as exit code otherwise -# the coverage generation will not complete. -# -# setup_target_for_coverage_lcov( -# NAME testrunner_coverage # New target name -# EXECUTABLE testrunner -j ${PROCESSOR_COUNT} # Executable in PROJECT_BINARY_DIR -# DEPENDENCIES testrunner # Dependencies to build first -# BASE_DIRECTORY "../" # Base directory for report -# # (defaults to PROJECT_SOURCE_DIR) -# EXCLUDE "src/dir1/*" "src/dir2/*" # Patterns to exclude (can be relative -# # to BASE_DIRECTORY, with CMake 3.4+) -# NO_DEMANGLE # Don't demangle C++ symbols -# # even if c++filt is found -# ) -function(setup_target_for_coverage_lcov) - - set(options NO_DEMANGLE) - set(oneValueArgs BASE_DIRECTORY NAME) - set(multiValueArgs EXCLUDE EXECUTABLE EXECUTABLE_ARGS DEPENDENCIES LCOV_ARGS GENHTML_ARGS) - cmake_parse_arguments(Coverage "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) - - if(NOT LCOV_PATH) - message(FATAL_ERROR "lcov not found! Aborting...") - endif() # NOT LCOV_PATH - - # Needed for gcov_for_clang.sh - if(NOT LLVM_COV_PATH) - message(FATAL_ERROR "llvm-cov not found! Aborting...") - endif() # NOT LLVM_COV_PATH - - if(NOT GENHTML_PATH) - message(FATAL_ERROR "genhtml not found! Aborting...") - endif() # NOT GENHTML_PATH - - # Set base directory (as absolute path), or default to PROJECT_SOURCE_DIR - if(${Coverage_BASE_DIRECTORY}) - get_filename_component(BASEDIR ${Coverage_BASE_DIRECTORY} ABSOLUTE) - else() - set(BASEDIR ${PROJECT_SOURCE_DIR}) - endif() - - # Collect excludes (CMake 3.4+: Also compute absolute paths) - set(LCOV_EXCLUDES "") - foreach(EXCLUDE ${Coverage_EXCLUDE} ${COVERAGE_EXCLUDES} ${COVERAGE_LCOV_EXCLUDES}) - if(CMAKE_VERSION VERSION_GREATER 3.4) - get_filename_component(EXCLUDE ${EXCLUDE} ABSOLUTE BASE_DIR ${BASEDIR}) - endif() - list(APPEND LCOV_EXCLUDES "${EXCLUDE}") - endforeach() - list(REMOVE_DUPLICATES LCOV_EXCLUDES) - - # Conditional arguments - if(CPPFILT_PATH AND NOT ${Coverage_NO_DEMANGLE}) - set(GENHTML_EXTRA_ARGS "--demangle-cpp") - endif() - - set(GCOV_PATH ${CMAKE_SOURCE_DIR}/cmake/gcov_for_clang.sh) - - # Setup target - add_custom_target(${Coverage_NAME} - - # Cleanup lcov - COMMAND ${LCOV_PATH} ${Coverage_LCOV_ARGS} --gcov-tool ${GCOV_PATH} -directory . -b ${BASEDIR} --zerocounters - # Create baseline to make sure untouched files show up in the report - COMMAND ${LCOV_PATH} ${Coverage_LCOV_ARGS} --gcov-tool ${GCOV_PATH} -c -i -d . -b ${BASEDIR} -o ${Coverage_NAME}.base - - # Run tests - COMMAND ${Coverage_EXECUTABLE} ${Coverage_EXECUTABLE_ARGS} - - # Capturing lcov counters and generating report - COMMAND ${LCOV_PATH} ${Coverage_LCOV_ARGS} --gcov-tool ${GCOV_PATH} --directory . -b ${BASEDIR} --capture --output-file ${Coverage_NAME}.capture - # add baseline counters - COMMAND ${LCOV_PATH} ${Coverage_LCOV_ARGS} --gcov-tool ${GCOV_PATH} -a ${Coverage_NAME}.base -a ${Coverage_NAME}.capture --output-file ${Coverage_NAME}.total - # filter collected data to final coverage report - COMMAND ${LCOV_PATH} ${Coverage_LCOV_ARGS} --gcov-tool ${GCOV_PATH} --remove ${Coverage_NAME}.total ${LCOV_EXCLUDES} --output-file ${Coverage_NAME}.info - - # Generate HTML output - COMMAND ${GENHTML_PATH} ${GENHTML_EXTRA_ARGS} ${Coverage_GENHTML_ARGS} -o ${Coverage_NAME} ${Coverage_NAME}.info - - # Set output files as GENERATED (will be removed on 'make clean') - BYPRODUCTS - ${Coverage_NAME}.base - ${Coverage_NAME}.capture - ${Coverage_NAME}.total - ${Coverage_NAME}.info - ${Coverage_NAME} # report directory - - WORKING_DIRECTORY ${PROJECT_BINARY_DIR} - DEPENDS ${Coverage_DEPENDENCIES} - VERBATIM # Protect arguments to commands - COMMENT "Resetting code coverage counters to zero.\nProcessing code coverage counters and generating report." - ) - - # Show where to find the lcov info report - add_custom_command(TARGET ${Coverage_NAME} POST_BUILD - COMMAND ; - COMMENT "Lcov code coverage info report saved in ${Coverage_NAME}.info." - ) - - # Show info where to find the report - add_custom_command(TARGET ${Coverage_NAME} POST_BUILD - COMMAND ; - COMMENT "Open ./${Coverage_NAME}/index.html in your browser to view the coverage report." - ) - -endfunction() # setup_target_for_coverage_lcov - -# Defines a target for running and collection code coverage information -# Builds dependencies, runs the given executable and outputs reports. -# NOTE! The executable should always have a ZERO as exit code otherwise -# the coverage generation will not complete. -# -# setup_target_for_coverage_gcovr_xml( -# NAME ctest_coverage # New target name -# EXECUTABLE ctest -j ${PROCESSOR_COUNT} # Executable in PROJECT_BINARY_DIR -# DEPENDENCIES executable_target # Dependencies to build first -# BASE_DIRECTORY "../" # Base directory for report -# # (defaults to PROJECT_SOURCE_DIR) -# EXCLUDE "src/dir1/*" "src/dir2/*" # Patterns to exclude (can be relative -# # to BASE_DIRECTORY, with CMake 3.4+) -# ) -function(setup_target_for_coverage_gcovr_xml) - - set(options NONE) - set(oneValueArgs BASE_DIRECTORY NAME) - set(multiValueArgs EXCLUDE EXECUTABLE EXECUTABLE_ARGS DEPENDENCIES) - cmake_parse_arguments(Coverage "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) - - if(NOT GCOVR_PATH) - message(FATAL_ERROR "gcovr not found! Aborting...") - endif() # NOT GCOVR_PATH - - # Set base directory (as absolute path), or default to PROJECT_SOURCE_DIR - if(${Coverage_BASE_DIRECTORY}) - get_filename_component(BASEDIR ${Coverage_BASE_DIRECTORY} ABSOLUTE) - else() - set(BASEDIR ${PROJECT_SOURCE_DIR}) - endif() - - # Collect excludes (CMake 3.4+: Also compute absolute paths) - set(GCOVR_EXCLUDES "") - foreach(EXCLUDE ${Coverage_EXCLUDE} ${COVERAGE_EXCLUDES} ${COVERAGE_GCOVR_EXCLUDES}) - if(CMAKE_VERSION VERSION_GREATER 3.4) - get_filename_component(EXCLUDE ${EXCLUDE} ABSOLUTE BASE_DIR ${BASEDIR}) - endif() - list(APPEND GCOVR_EXCLUDES "${EXCLUDE}") - endforeach() - list(REMOVE_DUPLICATES GCOVR_EXCLUDES) - - # Combine excludes to several -e arguments - set(GCOVR_EXCLUDE_ARGS "") - foreach(EXCLUDE ${GCOVR_EXCLUDES}) - list(APPEND GCOVR_EXCLUDE_ARGS "-e") - list(APPEND GCOVR_EXCLUDE_ARGS "${EXCLUDE}") - endforeach() - - add_custom_target(${Coverage_NAME} - # Run tests - ${Coverage_EXECUTABLE} ${Coverage_EXECUTABLE_ARGS} - - # Running gcovr - COMMAND ${GCOVR_PATH} --xml - -r ${BASEDIR} ${GCOVR_EXCLUDE_ARGS} - --object-directory=${PROJECT_BINARY_DIR} - -o ${Coverage_NAME}.xml - BYPRODUCTS ${Coverage_NAME}.xml - WORKING_DIRECTORY ${PROJECT_BINARY_DIR} - DEPENDS ${Coverage_DEPENDENCIES} - VERBATIM # Protect arguments to commands - COMMENT "Running gcovr to produce Cobertura code coverage report." - ) - - # Show info where to find the report - add_custom_command(TARGET ${Coverage_NAME} POST_BUILD - COMMAND ; - COMMENT "Cobertura code coverage report saved in ${Coverage_NAME}.xml." - ) -endfunction() # setup_target_for_coverage_gcovr_xml - -# Defines a target for running and collection code coverage information -# Builds dependencies, runs the given executable and outputs reports. -# NOTE! The executable should always have a ZERO as exit code otherwise -# the coverage generation will not complete. -# -# setup_target_for_coverage_gcovr_html( -# NAME ctest_coverage # New target name -# EXECUTABLE ctest -j ${PROCESSOR_COUNT} # Executable in PROJECT_BINARY_DIR -# DEPENDENCIES executable_target # Dependencies to build first -# BASE_DIRECTORY "../" # Base directory for report -# # (defaults to PROJECT_SOURCE_DIR) -# EXCLUDE "src/dir1/*" "src/dir2/*" # Patterns to exclude (can be relative -# # to BASE_DIRECTORY, with CMake 3.4+) -# ) -function(setup_target_for_coverage_gcovr_html) - - set(options NONE) - set(oneValueArgs BASE_DIRECTORY NAME) - set(multiValueArgs EXCLUDE EXECUTABLE EXECUTABLE_ARGS DEPENDENCIES) - cmake_parse_arguments(Coverage "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) - - if(NOT GCOVR_PATH) - message(FATAL_ERROR "gcovr not found! Aborting...") - endif() # NOT GCOVR_PATH - - # Set base directory (as absolute path), or default to PROJECT_SOURCE_DIR - if(${Coverage_BASE_DIRECTORY}) - get_filename_component(BASEDIR ${Coverage_BASE_DIRECTORY} ABSOLUTE) - else() - set(BASEDIR ${PROJECT_SOURCE_DIR}) - endif() - - # Collect excludes (CMake 3.4+: Also compute absolute paths) - set(GCOVR_EXCLUDES "") - foreach(EXCLUDE ${Coverage_EXCLUDE} ${COVERAGE_EXCLUDES} ${COVERAGE_GCOVR_EXCLUDES}) - if(CMAKE_VERSION VERSION_GREATER 3.4) - get_filename_component(EXCLUDE ${EXCLUDE} ABSOLUTE BASE_DIR ${BASEDIR}) - endif() - list(APPEND GCOVR_EXCLUDES "${EXCLUDE}") - endforeach() - list(REMOVE_DUPLICATES GCOVR_EXCLUDES) - - # Combine excludes to several -e arguments - set(GCOVR_EXCLUDE_ARGS "") - foreach(EXCLUDE ${GCOVR_EXCLUDES}) - list(APPEND GCOVR_EXCLUDE_ARGS "-e") - list(APPEND GCOVR_EXCLUDE_ARGS "${EXCLUDE}") - endforeach() - - add_custom_target(${Coverage_NAME} - # Run tests - ${Coverage_EXECUTABLE} ${Coverage_EXECUTABLE_ARGS} - - # Create folder - COMMAND ${CMAKE_COMMAND} -E make_directory ${PROJECT_BINARY_DIR}/${Coverage_NAME} - - # Running gcovr - COMMAND ${GCOVR_PATH} --html --html-details - -r ${BASEDIR} ${GCOVR_EXCLUDE_ARGS} - --object-directory=${PROJECT_BINARY_DIR} - -o ${Coverage_NAME}/index.html - - BYPRODUCTS ${PROJECT_BINARY_DIR}/${Coverage_NAME} # report directory - WORKING_DIRECTORY ${PROJECT_BINARY_DIR} - DEPENDS ${Coverage_DEPENDENCIES} - VERBATIM # Protect arguments to commands - COMMENT "Running gcovr to produce HTML code coverage report." - ) - - # Show info where to find the report - add_custom_command(TARGET ${Coverage_NAME} POST_BUILD - COMMAND ; - COMMENT "Open ./${Coverage_NAME}/index.html in your browser to view the coverage report." - ) - -endfunction() # setup_target_for_coverage_gcovr_html - -function(append_coverage_compiler_flags) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE) - set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE) - message(STATUS "Appending code coverage compiler flags: ${COVERAGE_COMPILER_FLAGS}") -endfunction() # append_coverage_compiler_flags diff --git a/cmake/Hunter/config.cmake b/cmake/Hunter/config.cmake deleted file mode 100644 index 00c3753..0000000 --- a/cmake/Hunter/config.cmake +++ /dev/null @@ -1,128 +0,0 @@ -hunter_config(Boost - VERSION ${HUNTER_Boost_VERSION} - CMAKE_ARGS - USE_CONFIG_FROM_BOOST=ON - Boost_USE_STATIC_LIBS=ON - Boost_NO_BOOST_CMAKE=ON -) - -hunter_config(Protobuf - URL "https://github.com/koinos/protobuf/archive/e1b1477875a8b022903b548eb144f2c7bf4d9561.tar.gz" - SHA1 "5796707a98eec15ffb3ad86ff50e8eec5fa65e68" - CMAKE_ARGS - CMAKE_CXX_FLAGS=-fvisibility=hidden - CMAKE_C_FLAGS=-fvisibility=hidden -) - -hunter_config(rocksdb - URL "https://github.com/facebook/rocksdb/archive/v6.15.2.tar.gz" - SHA1 "daf7ef3946fd39c910acaaa57789af8515b39251" - CMAKE_ARGS - WITH_TESTS=OFF - WITH_TOOLS=OFF - WITH_JNI=OFF - WITH_BENCHMARK_TOOLS=OFF - WITH_CORE_TOOLS=OFF - WITH_GFLAGS=OFF - PORTABLE=ON - FAIL_ON_WARNINGS=OFF - ROCKSDB_BUILD_SHARED=OFF - CMAKE_CXX_FLAGS=-fvisibility=hidden - CMAKE_C_FLAGS=-fvisibility=hidden -) - -hunter_config(yaml-cpp - VERSION "0.6.3" - CMAKE_ARGS - CMAKE_CXX_FLAGS=-fvisibility=hidden - CMAKE_C_FLAGS=-fvisibility=hidden -) - -hunter_config(gRPC - VERSION 1.31.0-p0 - CMAKE_ARGS - CMAKE_POSITION_INDEPENDENT_CODE=ON - CMAKE_CXX_STANDARD=17 - CMAKE_CXX_STANDARD_REQUIRED=ON -) - -hunter_config(abseil - VERSION ${HUNTER_abseil_VERSION} - CMAKE_ARGS - CMAKE_POSITION_INDEPENDENT_CODE=ON - CMAKE_CXX_STANDARD=17 - CMAKE_CXX_STANDARD_REQUIRED=ON -) - -hunter_config(re2 - VERSION ${HUNTER_re2_VERSION} - CMAKE_ARGS - CMAKE_POSITION_INDEPENDENT_CODE=ON - CMAKE_CXX_STANDARD=17 - CMAKE_CXX_STANDARD_REQUIRED=ON -) - -hunter_config(c-ares - VERSION ${HUNTER_c-ares_VERSION} - CMAKE_ARGS - CMAKE_POSITION_INDEPENDENT_CODE=ON - CMAKE_CXX_STANDARD=17 - CMAKE_CXX_STANDARD_REQUIRED=ON -) - -hunter_config(ZLIB - VERSION ${HUNTER_ZLIB_VERSION} - CMAKE_ARGS - CMAKE_POSITION_INDEPENDENT_CODE=ON - CMAKE_CXX_STANDARD=17 - CMAKE_CXX_STANDARD_REQUIRED=ON -) - -hunter_config(libsecp256k1 - URL "https://github.com/soramitsu/soramitsu-libsecp256k1/archive/c7630e1bac638c0f16ee66d4dce7b5c49eecbaa5.tar.gz" - SHA1 "0534fa8948f279b26fd102905215a56f0ad7fa18" -) - -hunter_config(libsecp256k1-vrf - URL "https://github.com/koinos/secp256k1-vrf/archive/db479e83be5685f652a9bafefaef77246fdf3bbe.tar.gz" - SHA1 "62df75e061c4afd6f0548f1e8267cc3da6abee15" -) - -hunter_config(ethash - URL "https://github.com/chfast/ethash/archive/refs/tags/v0.8.0.tar.gz" - SHA1 "41fd440f70b6a8dfc3fd29b20f471dcbd1345ad0" - CMAKE_ARGS - CMAKE_CXX_STANDARD=17 - CMAKE_CXX_STANDARD_REQUIRED=ON -) - -hunter_config(koinos_log - URL "https://github.com/koinos/koinos-log-cpp/archive/ca1fdcbb26ee2d9c2c45f8692747b3f7a5235025.tar.gz" - SHA1 "3eb809598fc1812e217d867e583abe69f4804e38" - CMAKE_ARGS - BUILD_TESTS=OFF -) - -hunter_config(koinos_util - URL "https://github.com/koinos/koinos-util-cpp/archive/dd3e15f0b08a99082b736b901bb78c0af4ed1982.tar.gz" - SHA1 "e5b475c10885dc5426c16a3e1122267b4a1668e1" - CMAKE_ARGS - BUILD_TESTS=OFF -) - -hunter_config(koinos_proto - URL "https://github.com/koinos/koinos-proto-cpp/archive/04d6a7f0cf8d2eeaddd105441c398eaff8a1a519.tar.gz" - SHA1 "6d168b017b2545b03b8cd3ea4b1590b471da78e7" -) - -hunter_config(koinos_exception - URL "https://github.com/koinos/koinos-exception-cpp/archive/5501569e8bec1c97ddc1257e25ec1149bc2b50e9.tar.gz" - SHA1 "5c6966904fa5d28b7ea86194ef2fb4ce68fbdb59" - CMAKE_ARGS - BUILD_TESTS=OFF -) - -hunter_config(koinos_crypto - URL "https://github.com/koinos/koinos-crypto-cpp/archive/2f91acfd683b824439b9844095cdc2e89f371037.tar.gz" - SHA1 "88a3d6f6a6d029aa287f85acb4a878dc844818b1" -) diff --git a/cmake/Hunter/passwords.cmake b/cmake/Hunter/passwords.cmake deleted file mode 100644 index 357202e..0000000 --- a/cmake/Hunter/passwords.cmake +++ /dev/null @@ -1,8 +0,0 @@ -# cmake/Hunter/passwords.cmake - -hunter_upload_password( - REPO_OWNER "koinos" - REPO "hunter-cache" - USERNAME "koinos-ci" - PASSWORD "$ENV{GITHUB_USER_PASSWORD}" -) diff --git a/cmake/HunterGate.cmake b/cmake/HunterGate.cmake deleted file mode 100644 index 6d9cc24..0000000 --- a/cmake/HunterGate.cmake +++ /dev/null @@ -1,539 +0,0 @@ -# Copyright (c) 2013-2019, Ruslan Baratov -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright notice, this -# list of conditions and the following disclaimer. -# -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -# This is a gate file to Hunter package manager. -# Include this file using `include` command and add package you need, example: -# -# cmake_minimum_required(VERSION 3.2) -# -# include("cmake/HunterGate.cmake") -# HunterGate( -# URL "https://github.com/path/to/hunter/archive.tar.gz" -# SHA1 "798501e983f14b28b10cda16afa4de69eee1da1d" -# ) -# -# project(MyProject) -# -# hunter_add_package(Foo) -# hunter_add_package(Boo COMPONENTS Bar Baz) -# -# Projects: -# * https://github.com/hunter-packages/gate/ -# * https://github.com/ruslo/hunter - -option(HUNTER_ENABLED "Enable Hunter package manager support" ON) - -if(HUNTER_ENABLED) - if(CMAKE_VERSION VERSION_LESS "3.2") - message( - FATAL_ERROR - "At least CMake version 3.2 required for Hunter dependency management." - " Update CMake or set HUNTER_ENABLED to OFF." - ) - endif() -endif() - -include(CMakeParseArguments) # cmake_parse_arguments - -option(HUNTER_STATUS_PRINT "Print working status" ON) -option(HUNTER_STATUS_DEBUG "Print a lot info" OFF) -option(HUNTER_TLS_VERIFY "Enable/disable TLS certificate checking on downloads" ON) - -set(HUNTER_ERROR_PAGE "https://docs.hunter.sh/en/latest/reference/errors") - -function(hunter_gate_status_print) - if(HUNTER_STATUS_PRINT OR HUNTER_STATUS_DEBUG) - foreach(print_message ${ARGV}) - message(STATUS "[hunter] ${print_message}") - endforeach() - endif() -endfunction() - -function(hunter_gate_status_debug) - if(HUNTER_STATUS_DEBUG) - foreach(print_message ${ARGV}) - string(TIMESTAMP timestamp) - message(STATUS "[hunter *** DEBUG *** ${timestamp}] ${print_message}") - endforeach() - endif() -endfunction() - -function(hunter_gate_error_page error_page) - message("------------------------------ ERROR ------------------------------") - message(" ${HUNTER_ERROR_PAGE}/${error_page}.html") - message("-------------------------------------------------------------------") - message("") - message(FATAL_ERROR "") -endfunction() - -function(hunter_gate_internal_error) - message("") - foreach(print_message ${ARGV}) - message("[hunter ** INTERNAL **] ${print_message}") - endforeach() - message("[hunter ** INTERNAL **] [Directory:${CMAKE_CURRENT_LIST_DIR}]") - message("") - hunter_gate_error_page("error.internal") -endfunction() - -function(hunter_gate_fatal_error) - cmake_parse_arguments(hunter "" "ERROR_PAGE" "" "${ARGV}") - if("${hunter_ERROR_PAGE}" STREQUAL "") - hunter_gate_internal_error("Expected ERROR_PAGE") - endif() - message("") - foreach(x ${hunter_UNPARSED_ARGUMENTS}) - message("[hunter ** FATAL ERROR **] ${x}") - endforeach() - message("[hunter ** FATAL ERROR **] [Directory:${CMAKE_CURRENT_LIST_DIR}]") - message("") - hunter_gate_error_page("${hunter_ERROR_PAGE}") -endfunction() - -function(hunter_gate_user_error) - hunter_gate_fatal_error(${ARGV} ERROR_PAGE "error.incorrect.input.data") -endfunction() - -function(hunter_gate_self root version sha1 result) - string(COMPARE EQUAL "${root}" "" is_bad) - if(is_bad) - hunter_gate_internal_error("root is empty") - endif() - - string(COMPARE EQUAL "${version}" "" is_bad) - if(is_bad) - hunter_gate_internal_error("version is empty") - endif() - - string(COMPARE EQUAL "${sha1}" "" is_bad) - if(is_bad) - hunter_gate_internal_error("sha1 is empty") - endif() - - string(SUBSTRING "${sha1}" 0 7 archive_id) - - if(EXISTS "${root}/cmake/Hunter") - set(hunter_self "${root}") - else() - set( - hunter_self - "${root}/_Base/Download/Hunter/${version}/${archive_id}/Unpacked" - ) - endif() - - set("${result}" "${hunter_self}" PARENT_SCOPE) -endfunction() - -# Set HUNTER_GATE_ROOT cmake variable to suitable value. -function(hunter_gate_detect_root) - # Check CMake variable - string(COMPARE NOTEQUAL "${HUNTER_ROOT}" "" not_empty) - if(not_empty) - set(HUNTER_GATE_ROOT "${HUNTER_ROOT}" PARENT_SCOPE) - hunter_gate_status_debug("HUNTER_ROOT detected by cmake variable") - return() - endif() - - # Check environment variable - string(COMPARE NOTEQUAL "$ENV{HUNTER_ROOT}" "" not_empty) - if(not_empty) - set(HUNTER_GATE_ROOT "$ENV{HUNTER_ROOT}" PARENT_SCOPE) - hunter_gate_status_debug("HUNTER_ROOT detected by environment variable") - return() - endif() - - # Check HOME environment variable - string(COMPARE NOTEQUAL "$ENV{HOME}" "" result) - if(result) - set(HUNTER_GATE_ROOT "$ENV{HOME}/.hunter" PARENT_SCOPE) - hunter_gate_status_debug("HUNTER_ROOT set using HOME environment variable") - return() - endif() - - # Check SYSTEMDRIVE and USERPROFILE environment variable (windows only) - if(WIN32) - string(COMPARE NOTEQUAL "$ENV{SYSTEMDRIVE}" "" result) - if(result) - set(HUNTER_GATE_ROOT "$ENV{SYSTEMDRIVE}/.hunter" PARENT_SCOPE) - hunter_gate_status_debug( - "HUNTER_ROOT set using SYSTEMDRIVE environment variable" - ) - return() - endif() - - string(COMPARE NOTEQUAL "$ENV{USERPROFILE}" "" result) - if(result) - set(HUNTER_GATE_ROOT "$ENV{USERPROFILE}/.hunter" PARENT_SCOPE) - hunter_gate_status_debug( - "HUNTER_ROOT set using USERPROFILE environment variable" - ) - return() - endif() - endif() - - hunter_gate_fatal_error( - "Can't detect HUNTER_ROOT" - ERROR_PAGE "error.detect.hunter.root" - ) -endfunction() - -function(hunter_gate_download dir) - string( - COMPARE - NOTEQUAL - "$ENV{HUNTER_DISABLE_AUTOINSTALL}" - "" - disable_autoinstall - ) - if(disable_autoinstall AND NOT HUNTER_RUN_INSTALL) - hunter_gate_fatal_error( - "Hunter not found in '${dir}'" - "Set HUNTER_RUN_INSTALL=ON to auto-install it from '${HUNTER_GATE_URL}'" - "Settings:" - " HUNTER_ROOT: ${HUNTER_GATE_ROOT}" - " HUNTER_SHA1: ${HUNTER_GATE_SHA1}" - ERROR_PAGE "error.run.install" - ) - endif() - string(COMPARE EQUAL "${dir}" "" is_bad) - if(is_bad) - hunter_gate_internal_error("Empty 'dir' argument") - endif() - - string(COMPARE EQUAL "${HUNTER_GATE_SHA1}" "" is_bad) - if(is_bad) - hunter_gate_internal_error("HUNTER_GATE_SHA1 empty") - endif() - - string(COMPARE EQUAL "${HUNTER_GATE_URL}" "" is_bad) - if(is_bad) - hunter_gate_internal_error("HUNTER_GATE_URL empty") - endif() - - set(done_location "${dir}/DONE") - set(sha1_location "${dir}/SHA1") - - set(build_dir "${dir}/Build") - set(cmakelists "${dir}/CMakeLists.txt") - - hunter_gate_status_debug("Locking directory: ${dir}") - file(LOCK "${dir}" DIRECTORY GUARD FUNCTION) - hunter_gate_status_debug("Lock done") - - if(EXISTS "${done_location}") - # while waiting for lock other instance can do all the job - hunter_gate_status_debug("File '${done_location}' found, skip install") - return() - endif() - - file(REMOVE_RECURSE "${build_dir}") - file(REMOVE_RECURSE "${cmakelists}") - - file(MAKE_DIRECTORY "${build_dir}") # check directory permissions - - # Disabling languages speeds up a little bit, reduces noise in the output - # and avoids path too long windows error - file( - WRITE - "${cmakelists}" - "cmake_minimum_required(VERSION 3.2)\n" - "project(HunterDownload LANGUAGES NONE)\n" - "include(ExternalProject)\n" - "ExternalProject_Add(\n" - " Hunter\n" - " URL\n" - " \"${HUNTER_GATE_URL}\"\n" - " URL_HASH\n" - " SHA1=${HUNTER_GATE_SHA1}\n" - " DOWNLOAD_DIR\n" - " \"${dir}\"\n" - " TLS_VERIFY\n" - " ${HUNTER_TLS_VERIFY}\n" - " SOURCE_DIR\n" - " \"${dir}/Unpacked\"\n" - " CONFIGURE_COMMAND\n" - " \"\"\n" - " BUILD_COMMAND\n" - " \"\"\n" - " INSTALL_COMMAND\n" - " \"\"\n" - ")\n" - ) - - if(HUNTER_STATUS_DEBUG) - set(logging_params "") - else() - set(logging_params OUTPUT_QUIET) - endif() - - hunter_gate_status_debug("Run generate") - - # Need to add toolchain file too. - # Otherwise on Visual Studio + MDD this will fail with error: - # "Could not find an appropriate version of the Windows 10 SDK installed on this machine" - if(EXISTS "${CMAKE_TOOLCHAIN_FILE}") - get_filename_component(absolute_CMAKE_TOOLCHAIN_FILE "${CMAKE_TOOLCHAIN_FILE}" ABSOLUTE) - set(toolchain_arg "-DCMAKE_TOOLCHAIN_FILE=${absolute_CMAKE_TOOLCHAIN_FILE}") - else() - # 'toolchain_arg' can't be empty - set(toolchain_arg "-DCMAKE_TOOLCHAIN_FILE=") - endif() - - string(COMPARE EQUAL "${CMAKE_MAKE_PROGRAM}" "" no_make) - if(no_make) - set(make_arg "") - else() - # Test case: remove Ninja from PATH but set it via CMAKE_MAKE_PROGRAM - set(make_arg "-DCMAKE_MAKE_PROGRAM=${CMAKE_MAKE_PROGRAM}") - endif() - - execute_process( - COMMAND - "${CMAKE_COMMAND}" - "-H${dir}" - "-B${build_dir}" - "-G${CMAKE_GENERATOR}" - "${toolchain_arg}" - ${make_arg} - WORKING_DIRECTORY "${dir}" - RESULT_VARIABLE download_result - ${logging_params} - ) - - if(NOT download_result EQUAL 0) - hunter_gate_internal_error( - "Configure project failed." - "To reproduce the error run: ${CMAKE_COMMAND} -H${dir} -B${build_dir} -G${CMAKE_GENERATOR} ${toolchain_arg} ${make_arg}" - "In directory ${dir}" - ) - endif() - - hunter_gate_status_print( - "Initializing Hunter workspace (${HUNTER_GATE_SHA1})" - " ${HUNTER_GATE_URL}" - " -> ${dir}" - ) - execute_process( - COMMAND "${CMAKE_COMMAND}" --build "${build_dir}" - WORKING_DIRECTORY "${dir}" - RESULT_VARIABLE download_result - ${logging_params} - ) - - if(NOT download_result EQUAL 0) - hunter_gate_internal_error("Build project failed") - endif() - - file(REMOVE_RECURSE "${build_dir}") - file(REMOVE_RECURSE "${cmakelists}") - - file(WRITE "${sha1_location}" "${HUNTER_GATE_SHA1}") - file(WRITE "${done_location}" "DONE") - - hunter_gate_status_debug("Finished") -endfunction() - -# Must be a macro so master file 'cmake/Hunter' can -# apply all variables easily just by 'include' command -# (otherwise PARENT_SCOPE magic needed) -macro(HunterGate) - if(HUNTER_GATE_DONE) - # variable HUNTER_GATE_DONE set explicitly for external project - # (see `hunter_download`) - set_property(GLOBAL PROPERTY HUNTER_GATE_DONE YES) - endif() - - # First HunterGate command will init Hunter, others will be ignored - get_property(_hunter_gate_done GLOBAL PROPERTY HUNTER_GATE_DONE SET) - - if(NOT HUNTER_ENABLED) - # Empty function to avoid error "unknown function" - function(hunter_add_package) - endfunction() - - set( - _hunter_gate_disabled_mode_dir - "${CMAKE_CURRENT_LIST_DIR}/cmake/Hunter/disabled-mode" - ) - if(EXISTS "${_hunter_gate_disabled_mode_dir}") - hunter_gate_status_debug( - "Adding \"disabled-mode\" modules: ${_hunter_gate_disabled_mode_dir}" - ) - list(APPEND CMAKE_PREFIX_PATH "${_hunter_gate_disabled_mode_dir}") - endif() - elseif(_hunter_gate_done) - hunter_gate_status_debug("Secondary HunterGate (use old settings)") - hunter_gate_self( - "${HUNTER_CACHED_ROOT}" - "${HUNTER_VERSION}" - "${HUNTER_SHA1}" - _hunter_self - ) - include("${_hunter_self}/cmake/Hunter") - else() - set(HUNTER_GATE_LOCATION "${CMAKE_CURRENT_SOURCE_DIR}") - - string(COMPARE NOTEQUAL "${PROJECT_NAME}" "" _have_project_name) - if(_have_project_name) - hunter_gate_fatal_error( - "Please set HunterGate *before* 'project' command. " - "Detected project: ${PROJECT_NAME}" - ERROR_PAGE "error.huntergate.before.project" - ) - endif() - - cmake_parse_arguments( - HUNTER_GATE "LOCAL" "URL;SHA1;GLOBAL;FILEPATH" "" ${ARGV} - ) - - string(COMPARE EQUAL "${HUNTER_GATE_SHA1}" "" _empty_sha1) - string(COMPARE EQUAL "${HUNTER_GATE_URL}" "" _empty_url) - string( - COMPARE - NOTEQUAL - "${HUNTER_GATE_UNPARSED_ARGUMENTS}" - "" - _have_unparsed - ) - string(COMPARE NOTEQUAL "${HUNTER_GATE_GLOBAL}" "" _have_global) - string(COMPARE NOTEQUAL "${HUNTER_GATE_FILEPATH}" "" _have_filepath) - - if(_have_unparsed) - hunter_gate_user_error( - "HunterGate unparsed arguments: ${HUNTER_GATE_UNPARSED_ARGUMENTS}" - ) - endif() - if(_empty_sha1) - hunter_gate_user_error("SHA1 suboption of HunterGate is mandatory") - endif() - if(_empty_url) - hunter_gate_user_error("URL suboption of HunterGate is mandatory") - endif() - if(_have_global) - if(HUNTER_GATE_LOCAL) - hunter_gate_user_error("Unexpected LOCAL (already has GLOBAL)") - endif() - if(_have_filepath) - hunter_gate_user_error("Unexpected FILEPATH (already has GLOBAL)") - endif() - endif() - if(HUNTER_GATE_LOCAL) - if(_have_global) - hunter_gate_user_error("Unexpected GLOBAL (already has LOCAL)") - endif() - if(_have_filepath) - hunter_gate_user_error("Unexpected FILEPATH (already has LOCAL)") - endif() - endif() - if(_have_filepath) - if(_have_global) - hunter_gate_user_error("Unexpected GLOBAL (already has FILEPATH)") - endif() - if(HUNTER_GATE_LOCAL) - hunter_gate_user_error("Unexpected LOCAL (already has FILEPATH)") - endif() - endif() - - hunter_gate_detect_root() # set HUNTER_GATE_ROOT - - # Beautify path, fix probable problems with windows path slashes - get_filename_component( - HUNTER_GATE_ROOT "${HUNTER_GATE_ROOT}" ABSOLUTE - ) - hunter_gate_status_debug("HUNTER_ROOT: ${HUNTER_GATE_ROOT}") - if(NOT HUNTER_ALLOW_SPACES_IN_PATH) - string(FIND "${HUNTER_GATE_ROOT}" " " _contain_spaces) - if(NOT _contain_spaces EQUAL -1) - hunter_gate_fatal_error( - "HUNTER_ROOT (${HUNTER_GATE_ROOT}) contains spaces." - "Set HUNTER_ALLOW_SPACES_IN_PATH=ON to skip this error" - "(Use at your own risk!)" - ERROR_PAGE "error.spaces.in.hunter.root" - ) - endif() - endif() - - string( - REGEX - MATCH - "[0-9]+\\.[0-9]+\\.[0-9]+[-_a-z0-9]*" - HUNTER_GATE_VERSION - "${HUNTER_GATE_URL}" - ) - string(COMPARE EQUAL "${HUNTER_GATE_VERSION}" "" _is_empty) - if(_is_empty) - set(HUNTER_GATE_VERSION "unknown") - endif() - - hunter_gate_self( - "${HUNTER_GATE_ROOT}" - "${HUNTER_GATE_VERSION}" - "${HUNTER_GATE_SHA1}" - _hunter_self - ) - - set(_master_location "${_hunter_self}/cmake/Hunter") - if(EXISTS "${HUNTER_GATE_ROOT}/cmake/Hunter") - # Hunter downloaded manually (e.g. by 'git clone') - set(_unused "xxxxxxxxxx") - set(HUNTER_GATE_SHA1 "${_unused}") - set(HUNTER_GATE_VERSION "${_unused}") - else() - get_filename_component(_archive_id_location "${_hunter_self}/.." ABSOLUTE) - set(_done_location "${_archive_id_location}/DONE") - set(_sha1_location "${_archive_id_location}/SHA1") - - # Check Hunter already downloaded by HunterGate - if(NOT EXISTS "${_done_location}") - hunter_gate_download("${_archive_id_location}") - endif() - - if(NOT EXISTS "${_done_location}") - hunter_gate_internal_error("hunter_gate_download failed") - endif() - - if(NOT EXISTS "${_sha1_location}") - hunter_gate_internal_error("${_sha1_location} not found") - endif() - file(READ "${_sha1_location}" _sha1_value) - string(COMPARE EQUAL "${_sha1_value}" "${HUNTER_GATE_SHA1}" _is_equal) - if(NOT _is_equal) - hunter_gate_internal_error( - "Short SHA1 collision:" - " ${_sha1_value} (from ${_sha1_location})" - " ${HUNTER_GATE_SHA1} (HunterGate)" - ) - endif() - if(NOT EXISTS "${_master_location}") - hunter_gate_user_error( - "Master file not found:" - " ${_master_location}" - "try to update Hunter/HunterGate" - ) - endif() - endif() - include("${_master_location}") - set_property(GLOBAL PROPERTY HUNTER_GATE_DONE YES) - endif() -endmacro() diff --git a/cmake/config.cmake.in b/cmake/config.cmake.in deleted file mode 100644 index 7064424..0000000 --- a/cmake/config.cmake.in +++ /dev/null @@ -1,16 +0,0 @@ -include(FindPackageHandleStandardArgs) -set(${CMAKE_FIND_PACKAGE_NAME}_CONFIG ${CMAKE_CURRENT_LIST_FILE}) -find_package_handle_standard_args(@PROJECT_NAME@ CONFIG_MODE) - -if(NOT TARGET Koinos::@KOINOS_LIB_TARGET_NAME@) - include("${CMAKE_CURRENT_LIST_DIR}/@KOINOS_LIB_TARGETS_EXPORT_NAME@.cmake") - if((NOT TARGET @KOINOS_LIB_TARGET_NAME@) AND - (NOT @PROJECT_NAME@_FIND_VERSION OR - @PROJECT_NAME@_FIND_VERSION VERSION_LESS 3.2.0)) - add_library(@KOINOS_LIB_TARGET_NAME@ INTERFACE IMPORTED GLOBAL) - set_target_properties(@KOINOS_LIB_TARGET_NAME@ PROPERTIES - INTERFACE_LINK_LIBRARIES Koinos::@KOINOS_LIB_TARGET_NAME@ - ) - endif() - add_library(Koinos::@KOINOS_LIB_TARGET_SHORT_NAME@ ALIAS @KOINOS_LIB_TARGET_NAME@) -endif() diff --git a/cmake/gcov_for_clang.sh b/cmake/gcov_for_clang.sh deleted file mode 100755 index 2788ba9..0000000 --- a/cmake/gcov_for_clang.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/sh - -which llvm-cov-11 > /dev/null -if [ $? -eq 0 ]; then - exec llvm-cov-11 gcov "$@" -else - exec llvm-cov gcov "$@" -fi diff --git a/cmake/koinos_state_dbConfigVersion.cmake.in b/cmake/koinos_state_dbConfigVersion.cmake.in deleted file mode 100644 index dc04e54..0000000 --- a/cmake/koinos_state_dbConfigVersion.cmake.in +++ /dev/null @@ -1,67 +0,0 @@ -# This is a basic version file for the Config-mode of find_package(). -# It is used by write_basic_package_version_file() as input file for configure_file() -# to create a version-file which can be installed along a config.cmake file. -# -# The created file sets PACKAGE_VERSION_EXACT if the current version string and -# the requested version string are exactly the same and it sets -# PACKAGE_VERSION_COMPATIBLE if the current version is >= requested version, -# but only if the requested major version is the same as the current one. -# The variable CVF_VERSION must be set before calling configure_file(). - - -set(PACKAGE_VERSION "@CVF_VERSION@") - -if(PACKAGE_VERSION VERSION_LESS PACKAGE_FIND_VERSION) - set(PACKAGE_VERSION_COMPATIBLE FALSE) -else() - - if("@CVF_VERSION@" MATCHES "^([0-9]+)\\.") - set(CVF_VERSION_MAJOR "${CMAKE_MATCH_1}") - else() - set(CVF_VERSION_MAJOR "@CVF_VERSION@") - endif() - - if(PACKAGE_FIND_VERSION_RANGE) - # both endpoints of the range must have the expected major version - math (EXPR CVF_VERSION_MAJOR_NEXT "${CVF_VERSION_MAJOR} + 1") - if (NOT PACKAGE_FIND_VERSION_MIN_MAJOR STREQUAL CVF_VERSION_MAJOR - OR ((PACKAGE_FIND_VERSION_RANGE_MAX STREQUAL "INCLUDE" AND NOT PACKAGE_FIND_VERSION_MAX_MAJOR STREQUAL CVF_VERSION_MAJOR) - OR (PACKAGE_FIND_VERSION_RANGE_MAX STREQUAL "EXCLUDE" AND NOT PACKAGE_FIND_VERSION_MAX VERSION_LESS_EQUAL CVF_VERSION_MAJOR_NEXT))) - set(PACKAGE_VERSION_COMPATIBLE FALSE) - elseif(PACKAGE_FIND_VERSION_MIN_MAJOR STREQUAL CVF_VERSION_MAJOR - AND ((PACKAGE_FIND_VERSION_RANGE_MAX STREQUAL "INCLUDE" AND PACKAGE_VERSION VERSION_LESS_EQUAL PACKAGE_FIND_VERSION_MAX) - OR (PACKAGE_FIND_VERSION_RANGE_MAX STREQUAL "EXCLUDE" AND PACKAGE_VERSION VERSION_LESS PACKAGE_FIND_VERSION_MAX))) - set(PACKAGE_VERSION_COMPATIBLE TRUE) - else() - set(PACKAGE_VERSION_COMPATIBLE FALSE) - endif() - else() - if(PACKAGE_FIND_VERSION_MAJOR STREQUAL CVF_VERSION_MAJOR) - set(PACKAGE_VERSION_COMPATIBLE TRUE) - else() - set(PACKAGE_VERSION_COMPATIBLE FALSE) - endif() - - if(PACKAGE_FIND_VERSION STREQUAL PACKAGE_VERSION) - set(PACKAGE_VERSION_EXACT TRUE) - endif() - endif() -endif() - - -# if the installed project requested no architecture check, don't perform the check -if("@CVF_ARCH_INDEPENDENT@") - return() -endif() - -# if the installed or the using project don't have CMAKE_SIZEOF_VOID_P set, ignore it: -if("${CMAKE_SIZEOF_VOID_P}" STREQUAL "" OR "@CMAKE_SIZEOF_VOID_P@" STREQUAL "") - return() -endif() - -# check that the installed version has the same 32/64bit-ness as the one which is currently searching: -if(NOT CMAKE_SIZEOF_VOID_P STREQUAL "@CMAKE_SIZEOF_VOID_P@") - math(EXPR installedBits "@CMAKE_SIZEOF_VOID_P@ * 8") - set(PACKAGE_VERSION "${PACKAGE_VERSION} (${installedBits}bit)") - set(PACKAGE_VERSION_UNSUITABLE TRUE) -endif() diff --git a/cmake/pkg-config.pc.in b/cmake/pkg-config.pc.in deleted file mode 100644 index 56fc07d..0000000 --- a/cmake/pkg-config.pc.in +++ /dev/null @@ -1,4 +0,0 @@ -Name: ${PROJECT_NAME} -Description: Koinos State DB Library -Version: ${PROJECT_VERSION} -Cflags: -I${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR} diff --git a/include/koinos/state_db/backends/backend.hpp b/include/koinos/state_db/backends/backend.hpp new file mode 100644 index 0000000..756d0d8 --- /dev/null +++ b/include/koinos/state_db/backends/backend.hpp @@ -0,0 +1,60 @@ +#pragma once + +#include + +#include +#include + +namespace koinos::state_db::backends { + +class abstract_backend +{ +public: + using key_type = detail::key_type; + using value_type = detail::value_type; + using size_type = detail::size_type; + + abstract_backend(); + virtual ~abstract_backend(){}; + + virtual iterator begin() = 0; + virtual iterator end() = 0; + + virtual void put( const key_type& k, const value_type& v ) = 0; + virtual const value_type* get( const key_type& ) const = 0; + virtual void erase( const key_type& k ) = 0; + virtual void clear() = 0; + + virtual size_type size() const = 0; + bool empty() const; + + virtual iterator find( const key_type& k ) = 0; + virtual iterator lower_bound( const key_type& k ) = 0; + + size_type revision() const; + void set_revision( size_type ); + + const crypto::multihash& id() const; + void set_id( const crypto::multihash& ); + + const crypto::multihash& merkle_root() const; + void set_merkle_root( const crypto::multihash& ); + + const protocol::block_header& block_header() const; + void set_block_header( const protocol::block_header& ); + + virtual void start_write_batch() = 0; + virtual void end_write_batch() = 0; + + virtual void store_metadata() = 0; + + virtual std::shared_ptr< abstract_backend > clone() const = 0; + +private: + size_type _revision = 0; + crypto::multihash _id; + crypto::multihash _merkle_root; + protocol::block_header _header; +}; + +} // namespace koinos::state_db::backends diff --git a/libraries/state_db/include/koinos/state_db/backends/exceptions.hpp b/include/koinos/state_db/backends/exceptions.hpp similarity index 88% rename from libraries/state_db/include/koinos/state_db/backends/exceptions.hpp rename to include/koinos/state_db/backends/exceptions.hpp index 2cc90a7..5181928 100644 --- a/libraries/state_db/include/koinos/state_db/backends/exceptions.hpp +++ b/include/koinos/state_db/backends/exceptions.hpp @@ -7,4 +7,4 @@ KOINOS_DECLARE_DERIVED_EXCEPTION( backend_exception, state_db_exception ); KOINOS_DECLARE_DERIVED_EXCEPTION( iterator_exception, state_db_exception ); KOINOS_DECLARE_DERIVED_EXCEPTION( internal_exception, state_db_exception ); -} // koinos::state_db::backends +} // namespace koinos::state_db::backends diff --git a/include/koinos/state_db/backends/iterator.hpp b/include/koinos/state_db/backends/iterator.hpp new file mode 100644 index 0000000..6a33ab6 --- /dev/null +++ b/include/koinos/state_db/backends/iterator.hpp @@ -0,0 +1,62 @@ +#pragma once + +#include + +#include + +namespace koinos::state_db::backends { + +class iterator; + +class abstract_iterator +{ +public: + using key_type = detail::key_type; + using value_type = detail::value_type; + + virtual ~abstract_iterator(){}; + + virtual const value_type& operator*() const = 0; + + virtual const key_type& key() const = 0; + + virtual abstract_iterator& operator++() = 0; + virtual abstract_iterator& operator--() = 0; + +private: + friend class iterator; + + virtual bool valid() const = 0; + virtual std::unique_ptr< abstract_iterator > copy() const = 0; +}; + +class iterator final +{ +public: + using key_type = detail::key_type; + using value_type = detail::value_type; + + iterator( std::unique_ptr< abstract_iterator > ); + iterator( const iterator& other ); + iterator( iterator&& other ); + + const value_type& operator*() const; + + const key_type& key() const; + const value_type& value() const; + + iterator& operator++(); + iterator& operator--(); + + iterator& operator=( iterator&& other ); + + friend bool operator==( const iterator& x, const iterator& y ); + friend bool operator!=( const iterator& x, const iterator& y ); + +private: + bool valid() const; + + std::unique_ptr< abstract_iterator > _itr; +}; + +} // namespace koinos::state_db::backends diff --git a/include/koinos/state_db/backends/map/map_backend.hpp b/include/koinos/state_db/backends/map/map_backend.hpp new file mode 100644 index 0000000..5c2454e --- /dev/null +++ b/include/koinos/state_db/backends/map/map_backend.hpp @@ -0,0 +1,46 @@ +#pragma once + +#include +#include + +namespace koinos::state_db::backends::map { + +class map_backend final: public abstract_backend +{ +public: + using key_type = abstract_backend::key_type; + using value_type = abstract_backend::value_type; + using size_type = abstract_backend::size_type; + + map_backend(); + virtual ~map_backend() override; + + // Iterators + virtual iterator begin() noexcept override; + virtual iterator end() noexcept override; + + // Modifiers + virtual void put( const key_type& k, const value_type& v ) override; + virtual const value_type* get( const key_type& ) const override; + virtual void erase( const key_type& k ) override; + virtual void clear() noexcept override; + + virtual size_type size() const noexcept override; + + // Lookup + virtual iterator find( const key_type& k ) override; + virtual iterator lower_bound( const key_type& k ) override; + + virtual void start_write_batch() override; + virtual void end_write_batch() override; + + virtual void store_metadata() override; + + virtual std::shared_ptr< abstract_backend > clone() const override; + +private: + std::map< key_type, value_type > _map; + protocol::block_header _header; +}; + +} // namespace koinos::state_db::backends::map diff --git a/include/koinos/state_db/backends/map/map_iterator.hpp b/include/koinos/state_db/backends/map/map_iterator.hpp new file mode 100644 index 0000000..83f606e --- /dev/null +++ b/include/koinos/state_db/backends/map/map_iterator.hpp @@ -0,0 +1,36 @@ +#pragma once + +#include + +#include + +namespace koinos::state_db::backends::map { + +class map_backend; + +class map_iterator final: public abstract_iterator +{ +public: + using value_type = abstract_iterator::value_type; + using map_impl = std::map< detail::key_type, detail::value_type >; + using iterator_impl = map_impl::iterator; + + map_iterator( std::unique_ptr< iterator_impl > itr, const map_impl& map ); + ~map_iterator(); + + virtual const value_type& operator*() const override; + + virtual const key_type& key() const override; + + virtual abstract_iterator& operator++() override; + virtual abstract_iterator& operator--() override; + +private: + virtual bool valid() const override; + virtual std::unique_ptr< abstract_iterator > copy() const override; + + std::unique_ptr< iterator_impl > _itr; + const map_impl& _map; +}; + +} // namespace koinos::state_db::backends::map diff --git a/libraries/state_db/include/koinos/state_db/backends/rocksdb/exceptions.hpp b/include/koinos/state_db/backends/rocksdb/exceptions.hpp similarity index 94% rename from libraries/state_db/include/koinos/state_db/backends/rocksdb/exceptions.hpp rename to include/koinos/state_db/backends/rocksdb/exceptions.hpp index 5205705..a226c68 100644 --- a/libraries/state_db/include/koinos/state_db/backends/rocksdb/exceptions.hpp +++ b/include/koinos/state_db/backends/rocksdb/exceptions.hpp @@ -13,4 +13,4 @@ KOINOS_DECLARE_DERIVED_EXCEPTION( rocksdb_write_exception, rocksdb_backend_excep KOINOS_DECLARE_DERIVED_EXCEPTION( rocksdb_session_in_progress, rocksdb_backend_exception ); KOINOS_DECLARE_DERIVED_EXCEPTION( rocksdb_internal_exception, rocksdb_backend_exception ); -} // koinos::state_db::backends::rocksdb +} // namespace koinos::state_db::backends::rocksdb diff --git a/include/koinos/state_db/backends/rocksdb/object_cache.hpp b/include/koinos/state_db/backends/rocksdb/object_cache.hpp new file mode 100644 index 0000000..dc065b4 --- /dev/null +++ b/include/koinos/state_db/backends/rocksdb/object_cache.hpp @@ -0,0 +1,46 @@ +#pragma once + +#include + +#include + +#include +#include +#include +#include +#include + +namespace koinos::state_db::backends::rocksdb { + +class object_cache +{ +public: + using key_type = detail::key_type; + using value_type = detail::value_type; + +private: + using lru_list_type = std::list< key_type >; + using value_map_type = + std::map< key_type, std::pair< std::shared_ptr< const value_type >, typename lru_list_type::iterator > >; + + lru_list_type _lru_list; + value_map_type _object_map; + std::size_t _cache_size = 0; + const std::size_t _cache_max_size; + std::mutex _mutex; + +public: + object_cache( std::size_t size ); + ~object_cache(); + + std::pair< bool, std::shared_ptr< const value_type > > get( const key_type& k ); + std::shared_ptr< const value_type > put( const key_type& k, std::shared_ptr< const value_type > v ); + + void remove( const key_type& k ); + + void clear(); + + std::mutex& get_mutex(); +}; + +} // namespace koinos::state_db::backends::rocksdb diff --git a/include/koinos/state_db/backends/rocksdb/rocksdb_backend.hpp b/include/koinos/state_db/backends/rocksdb/rocksdb_backend.hpp new file mode 100644 index 0000000..6dc6500 --- /dev/null +++ b/include/koinos/state_db/backends/rocksdb/rocksdb_backend.hpp @@ -0,0 +1,67 @@ +#pragma once + +#include +#include +#include +#include + +#include + +#include +#include +#include + +namespace koinos::state_db::backends::rocksdb { + +class rocksdb_backend final: public abstract_backend +{ +public: + using key_type = abstract_backend::key_type; + using value_type = abstract_backend::value_type; + using size_type = abstract_backend::size_type; + + rocksdb_backend(); + ~rocksdb_backend(); + + void open( const std::filesystem::path& p ); + void close(); + void flush(); + + virtual void start_write_batch() override; + virtual void end_write_batch() override; + + // Iterators + virtual iterator begin() override; + virtual iterator end() override; + + // Modifiers + virtual void put( const key_type& k, const value_type& v ) override; + virtual const value_type* get( const key_type& ) const override; + virtual void erase( const key_type& k ) override; + virtual void clear() override; + + virtual size_type size() const override; + + // Lookup + virtual iterator find( const key_type& k ) override; + virtual iterator lower_bound( const key_type& k ) override; + + virtual void store_metadata() override; + + virtual std::shared_ptr< abstract_backend > clone() const override; + +private: + void load_metadata(); + + using column_handles = std::vector< std::shared_ptr< ::rocksdb::ColumnFamilyHandle > >; + + std::shared_ptr< ::rocksdb::DB > _db; + std::optional< ::rocksdb::WriteBatch > _write_batch; + column_handles _handles; + ::rocksdb::WriteOptions _wopts; + std::shared_ptr< ::rocksdb::ReadOptions > _ropts; + mutable std::shared_ptr< object_cache > _cache; + size_type _size = 0; +}; + +} // namespace koinos::state_db::backends::rocksdb diff --git a/include/koinos/state_db/backends/rocksdb/rocksdb_iterator.hpp b/include/koinos/state_db/backends/rocksdb/rocksdb_iterator.hpp new file mode 100644 index 0000000..194654a --- /dev/null +++ b/include/koinos/state_db/backends/rocksdb/rocksdb_iterator.hpp @@ -0,0 +1,50 @@ +#pragma once + +#include +#include + +#include + +#include + +namespace koinos::state_db::backends::rocksdb { + +class rocksdb_backend; + +class rocksdb_iterator final: public abstract_iterator +{ +public: + using value_type = abstract_iterator::value_type; + + rocksdb_iterator( std::shared_ptr< ::rocksdb::DB > db, + std::shared_ptr< ::rocksdb::ColumnFamilyHandle > handle, + std::shared_ptr< const ::rocksdb::ReadOptions > opts, + std::shared_ptr< object_cache > cache ); + rocksdb_iterator( const rocksdb_iterator& other ); + virtual ~rocksdb_iterator() override; + + virtual const value_type& operator*() const override; + + virtual const key_type& key() const override; + + virtual abstract_iterator& operator++() override; + virtual abstract_iterator& operator--() override; + +private: + friend class rocksdb_backend; + + virtual bool valid() const override; + virtual std::unique_ptr< abstract_iterator > copy() const override; + + void update_cache_value() const; + + std::shared_ptr< ::rocksdb::DB > _db; + std::shared_ptr< ::rocksdb::ColumnFamilyHandle > _handle; + std::unique_ptr< ::rocksdb::Iterator > _iter; + std::shared_ptr< const ::rocksdb::ReadOptions > _opts; + mutable std::shared_ptr< object_cache > _cache; + mutable std::shared_ptr< const value_type > _cache_value; + mutable std::shared_ptr< const key_type > _key; +}; + +} // namespace koinos::state_db::backends::rocksdb diff --git a/libraries/state_db/include/koinos/state_db/backends/types.hpp b/include/koinos/state_db/backends/types.hpp similarity index 71% rename from libraries/state_db/include/koinos/state_db/backends/types.hpp rename to include/koinos/state_db/backends/types.hpp index e2c932b..2591706 100644 --- a/libraries/state_db/include/koinos/state_db/backends/types.hpp +++ b/include/koinos/state_db/backends/types.hpp @@ -1,5 +1,6 @@ #pragma once +#include #include namespace koinos::state_db::backends::detail { @@ -8,4 +9,4 @@ using key_type = std::string; using value_type = std::string; using size_type = uint64_t; -} // koinos::state_db::backends::detail +} // namespace koinos::state_db::backends::detail diff --git a/include/koinos/state_db/state_db.hpp b/include/koinos/state_db/state_db.hpp new file mode 100644 index 0000000..2bef39b --- /dev/null +++ b/include/koinos/state_db/state_db.hpp @@ -0,0 +1,487 @@ + +#pragma once +#include + +#include + +#include + +#include +#include +#include +#include +#include +#include + +namespace koinos::state_db { + +namespace detail { + +class database_impl; +class state_node_impl; +class anonymous_state_node_impl; + +} // namespace detail + +class abstract_state_node; +class anonymous_state_node; + +using abstract_state_node_ptr = std::shared_ptr< abstract_state_node >; +using anonymous_state_node_ptr = std::shared_ptr< anonymous_state_node >; + +enum class fork_resolution_algorithm +{ + fifo, + block_time, + pob +}; + +/** + * Allows querying the database at a particular checkpoint. + */ +class abstract_state_node +{ +public: + abstract_state_node(); + virtual ~abstract_state_node(); + + /** + * Fetch an object if one exists. + * + * - Size of the object is written into result.size + * - Object's value is copied into args.buf, provided buf != nullptr and buf_size is large enough + * - If buf is too small, buf is unchanged, however result is still updated + * - args.key is copied into result.key + */ + const object_value* get_object( const object_space& space, const object_key& key ) const; + + /** + * Get the next object. + * + * - Size of the object is written into result.size + * - Object's value is copied into args.buf, provided buf != nullptr and buf_size is large enough + * - If buf is too small, buf is unchanged, however result is still updated + * - Found key is written into result + */ + std::pair< const object_value*, const object_key > get_next_object( const object_space& space, + const object_key& key ) const; + + /** + * Get the previous object. + * + * - Size of the object is written into result.size + * - Object's value is copied into args.buf, provided buf != nullptr and buf_size is large enough + * - If buf is too small, buf is unchanged, however result is still updated + * - Found key is written into result + */ + std::pair< const object_value*, const object_key > get_prev_object( const object_space& space, + const object_key& key ) const; + + /** + * Write an object into the state_node. + * + * - Fail if node is not writable. + * - If object exists, object is overwritten. + */ + int64_t put_object( const object_space& space, const object_key& key, const object_value* val ); + + /** + * Remove an object from the state_node + */ + int64_t remove_object( const object_space& space, const object_key& key ); + + /** + * Return true if the node is writable. + */ + bool is_finalized() const; + + /** + * Return the merkle root of writes on this state node + */ + crypto::multihash merkle_root() const; + + /** + * Returns the state delta entries associated with this state node + */ + std::vector< protocol::state_delta_entry > get_delta_entries() const; + + /** + * Returns an anonymous state node with this node as its parent. + */ + anonymous_state_node_ptr create_anonymous_node(); + + virtual const state_node_id& id() const = 0; + virtual const state_node_id& parent_id() const = 0; + virtual uint64_t revision() const = 0; + virtual abstract_state_node_ptr parent() const = 0; + virtual const protocol::block_header& block_header() const = 0; + + friend class detail::database_impl; + +protected: + virtual std::shared_ptr< abstract_state_node > shared_from_derived() = 0; + + std::unique_ptr< detail::state_node_impl > _impl; +}; + +class anonymous_state_node final: public abstract_state_node, + public std::enable_shared_from_this< anonymous_state_node > +{ +public: + anonymous_state_node(); + ~anonymous_state_node(); + + const state_node_id& id() const override; + const state_node_id& parent_id() const override; + uint64_t revision() const override; + abstract_state_node_ptr parent() const override; + const protocol::block_header& block_header() const override; + + void commit(); + void reset(); + + friend class abstract_state_node; + +protected: + std::shared_ptr< abstract_state_node > shared_from_derived() override; + +private: + abstract_state_node_ptr _parent; +}; + +/** + * Allows querying the database at a particular checkpoint. + */ +class state_node final: public abstract_state_node, + public std::enable_shared_from_this< state_node > +{ +public: + state_node(); + ~state_node(); + + const state_node_id& id() const override; + const state_node_id& parent_id() const override; + uint64_t revision() const override; + abstract_state_node_ptr parent() const override; + const protocol::block_header& block_header() const override; + +protected: + std::shared_ptr< abstract_state_node > shared_from_derived() override; +}; + +using state_node_ptr = std::shared_ptr< state_node >; +using genesis_init_function = std::function< void( state_node_ptr ) >; +using fork_list = std::vector< state_node_ptr >; +using state_node_comparator_function = std::function< state_node_ptr( fork_list&, state_node_ptr, state_node_ptr ) >; +using shared_lock_ptr = std::shared_ptr< const std::shared_lock< std::shared_mutex > >; +using unique_lock_ptr = std::shared_ptr< const std::unique_lock< std::shared_mutex > >; + +state_node_ptr fifo_comparator( fork_list& forks, state_node_ptr current_head, state_node_ptr new_head ); +state_node_ptr block_time_comparator( fork_list& forks, state_node_ptr current_head, state_node_ptr new_head ); +state_node_ptr pob_comparator( fork_list& forks, state_node_ptr current_head, state_node_ptr new_head ); + +/** + * database is designed to provide parallel access to the database across + * different states. + * + * It does by tracking positive state deltas, which can be merged on the fly + * at read time to return the correct state of the database. A database + * checkpoint is represented by the state_node class. Reads and writes happen + * against a state_node. + * + * States are organized as a tree with the assumption that one path wins out + * over time and cousin paths are discarded as the root is advanced. + * + * Currently, database is not thread safe. That is, calls directly on database + * are not thread safe. (i.e. deleting a node concurrently to creating a new + * node can leave database in an undefined state) + * + * Conccurrency across state nodes is supported native to the implementation + * without locks. Writes on a single state node need to be serialized, but + * reads are implicitly parallel. + * + * TODO: Either extend the design of database to support concurrent access + * or implement a some locking mechanism for access to the fork multi + * index container. + * + * There is an additional corner case that is difficult to address. + * + * Upon squashing a state node, readers may be reading from the node that + * is being squashed or an intermediate node between root and that node. + * Relatively speaking, this should happen infrequently (on the order of once + * per some number of seconds). As such, whatever guarantees concurrency + * should heavily favor readers. Writing can happen lazily, preferably when + * there is no contention from readers at all. + */ +class database final +{ +public: + database(); + ~database(); + + shared_lock_ptr get_shared_lock() const; + + unique_lock_ptr get_unique_lock() const; + + /** + * Open the database. + */ + void open( const std::optional< std::filesystem::path >& p, + genesis_init_function init, + fork_resolution_algorithm algo, + const unique_lock_ptr& lock ); + + /** + * Open the database. + */ + void open( const std::optional< std::filesystem::path >& p, + genesis_init_function init, + state_node_comparator_function comp, + const unique_lock_ptr& lock ); + + /** + * Close the database. + */ + void close( const unique_lock_ptr& lock ); + + /** + * Reset the database. + */ + void reset( const unique_lock_ptr& lock ); + + /** + * Get an ancestor of a node at a particular revision + */ + state_node_ptr + get_node_at_revision( uint64_t revision, const state_node_id& child_id, const shared_lock_ptr& lock ) const; + state_node_ptr get_node_at_revision( uint64_t revision, const shared_lock_ptr& lock ) const; + + /** + * Get an ancestor of a node at a particular revision + * + * WARNING: The state node returned does not have an internal lock. The caller + * must be careful to ensure internal consistency. Best practice is to not + * share this node with a parallel thread and to reset it before releasing the + * unique lock. + */ + state_node_ptr + get_node_at_revision( uint64_t revision, const state_node_id& child_id, const unique_lock_ptr& lock ) const; + state_node_ptr get_node_at_revision( uint64_t revision, const unique_lock_ptr& lock ) const; + + /** + * Get the state_node for the given state_node_id. + * + * Return an empty pointer if no node for the given id exists. + */ + state_node_ptr get_node( const state_node_id& node_id, const shared_lock_ptr& lock ) const; + + /** + * Get the state_node for the given state_node_id. + * + * Return an empty pointer if no node for the given id exists. + * + * WARNING: The state node returned does not have an internal lock. The caller + * must be careful to ensure internal consistency. Best practice is to not + * share this node with a parallel thread and to reset it before releasing the + * unique lock. + */ + state_node_ptr get_node( const state_node_id& node_id, const unique_lock_ptr& lock ) const; + + /** + * Create a writable state_node. + * + * - If parent_id refers to a writable node, fail. + * - Otherwise, return a new writable node. + * - Writing to the returned node will not modify the parent node. + * + * If the parent is subsequently discarded, database preserves + * as much of the parent's state storage as necessary to continue + * to serve queries on any (non-discarded) children. A discarded + * parent node's state may internally be merged into a child's + * state storage area, allowing the parent's state storage area + * to be freed. This merge may occur immediately, or it may be + * deferred or parallelized. + */ + state_node_ptr create_writable_node( const state_node_id& parent_id, + const state_node_id& new_id, + const protocol::block_header& header, + const shared_lock_ptr& lock ); + + /** + * Create a writable state_node. + * + * - If parent_id refers to a writable node, fail. + * - Otherwise, return a new writable node. + * - Writing to the returned node will not modify the parent node. + * + * If the parent is subsequently discarded, database preserves + * as much of the parent's state storage as necessary to continue + * to serve queries on any (non-discarded) children. A discarded + * parent node's state may internally be merged into a child's + * state storage area, allowing the parent's state storage area + * to be freed. This merge may occur immediately, or it may be + * deferred or parallelized. + * + * WARNING: The state node returned does not have an internal lock. The caller + * must be careful to ensure internal consistency. Best practice is to not + * share this node with a parallel thread and to reset it before releasing the + * unique lock. + */ + state_node_ptr create_writable_node( const state_node_id& parent_id, + const state_node_id& new_id, + const protocol::block_header& header, + const unique_lock_ptr& lock ); + + /** + * Clone a node with a new id and block header. + * + * Cannot clone a finalized node. + */ + state_node_ptr clone_node( const state_node_id& node_id, + const state_node_id& new_id, + const protocol::block_header& header, + const shared_lock_ptr& lock ); + + /** + * Clone a node with a new id and block header. + * + * Cannot clone a finalized node. + * + * WARNING: The state node returned does not have an internal lock. The caller + * must be careful to ensure internal consistency. Best practice is to not + * share this node with a parallel thread and to reset it before releasing the + * unique lock. + */ + state_node_ptr clone_node( const state_node_id& node_id, + const state_node_id& new_id, + const protocol::block_header& header, + const unique_lock_ptr& lock ); + + /** + * Finalize a node. The node will no longer be writable. + */ + void finalize_node( const state_node_id& node_id, const shared_lock_ptr& lock ); + + /** + * Finalize a node. The node will no longer be writable. + */ + void finalize_node( const state_node_id& node_id, const unique_lock_ptr& lock ); + + /** + * Discard the node, it can no longer be used. + * + * If the node has any children, they too will be deleted because + * there will no longer exist a path from root to those nodes. + * + * This will fail if the node you are deleting would cause the + * current head node to be delted. + */ + void discard_node( const state_node_id& node_id, const shared_lock_ptr& lock ); + + /** + * Discard the node, it can no longer be used. + * + * If the node has any children, they too will be deleted because + * there will no longer exist a path from root to those nodes. + * + * This will fail if the node you are deleting would cause the + * current head node to be delted. + */ + void discard_node( const state_node_id& node_id, const unique_lock_ptr& lock ); + + /** + * Squash the node in to the root state, committing it. + * Branching state between this node and its ancestor will be discarded + * and no longer accesible. + * + * It is the responsiblity of the caller to ensure no readers or writers + * are accessing affected nodes by this call. + * + * TODO: Implement thread safety within commit node to make + * database thread safe for all callers. + */ + void commit_node( const state_node_id& node_id, const unique_lock_ptr& lock ); + + /** + * Get and return the current "head" node. + * + * Head is determined by longest chain. Oldest + * chain wins in a tie of length. Only finalized + * nodes are eligible to become head. + */ + state_node_ptr get_head( const shared_lock_ptr& lock ) const; + + /** + * Get and return the current "head" node. + * + * Head is determined by longest chain. Oldest + * chain wins in a tie of length. Only finalized + * nodes are eligible to become head. + * + * WARNING: The state node returned does not have an internal lock. The caller + * must be careful to ensure internal consistency. Best practice is to not + * share this node with a parallel thread and to reset it before releasing the + * unique lock. + */ + state_node_ptr get_head( const unique_lock_ptr& lock ) const; + + /** + * Get and return a vector of all fork heads. + * + * Fork heads are any finalized nodes that do + * not have finalized children. + */ + std::vector< state_node_ptr > get_fork_heads( const shared_lock_ptr& lock ) const; + + /** + * Get and return a vector of all fork heads. + * + * Fork heads are any finalized nodes that do + * not have finalized children. + * + * WARNING: The state nodes returned does not have an internal lock. The caller + * must be careful to ensure internal consistency. Best practice is to not + * share this node with a parallel thread and to reset it before releasing the + * unique lock. + */ + std::vector< state_node_ptr > get_fork_heads( const unique_lock_ptr& lock ) const; + + /** + * Get and return a vector of all nodes. + */ + std::vector< state_node_ptr > get_all_nodes( const shared_lock_ptr& lock ) const; + + /** + * Get and return a vector of all nodes. + * + * WARNING: The state nodes returned does not have an internal lock. The caller + * must be careful to ensure internal consistency. Best practice is to not + * share this node with a parallel thread and to reset it before releasing the + * unique lock. + */ + std::vector< state_node_ptr > get_all_nodes( const unique_lock_ptr& lock ) const; + + /** + * Get and return the current "root" node. + * + * All state nodes are guaranteed to a descendant of root. + */ + state_node_ptr get_root( const shared_lock_ptr& lock ) const; + + /** + * Get and return the current "root" node. + * + * All state nodes are guaranteed to a descendant of root. + * + * WARNING: The state node returned does not have an internal lock. The caller + * must be careful to ensure internal consistency. Best practice is to not + * share this node with a parallel thread and to reset it before releasing the + * unique lock. + */ + state_node_ptr get_root( const unique_lock_ptr& lock ) const; + +private: + std::unique_ptr< detail::database_impl > impl; +}; + +} // namespace koinos::state_db diff --git a/libraries/state_db/include/koinos/state_db/state_db_types.hpp b/include/koinos/state_db/state_db_types.hpp similarity index 97% rename from libraries/state_db/include/koinos/state_db/state_db_types.hpp rename to include/koinos/state_db/state_db_types.hpp index 3911c03..57373a3 100644 --- a/libraries/state_db/include/koinos/state_db/state_db_types.hpp +++ b/include/koinos/state_db/state_db_types.hpp @@ -3,9 +3,9 @@ #include #include -#include #include #include +#include namespace koinos::state_db { @@ -49,4 +49,4 @@ KOINOS_DECLARE_DERIVED_EXCEPTION( cannot_discard, state_db_exception ); */ KOINOS_DECLARE_DERIVED_EXCEPTION( internal_error, state_db_exception ); -} // koinos::state_db +} // namespace koinos::state_db diff --git a/libraries/CMakeLists.txt b/libraries/CMakeLists.txt deleted file mode 100644 index 038e992..0000000 --- a/libraries/CMakeLists.txt +++ /dev/null @@ -1 +0,0 @@ -add_subdirectory(state_db) diff --git a/libraries/state_db/CMakeLists.txt b/libraries/state_db/CMakeLists.txt deleted file mode 100644 index 94d87f3..0000000 --- a/libraries/state_db/CMakeLists.txt +++ /dev/null @@ -1,49 +0,0 @@ -file(GLOB HEADERS - "include/koinos/state_db/*.hpp" - "include/koinos/state_db/detail/*.hpp" - "include/koinos/state_db/backends/*.hpp" - "include/koinos/state_db/backends/map/*.hpp" - "include/koinos/state_db/backends/rocksdb/*.hpp") -add_library(koinos_state_db - state_db.cpp - detail/state_delta.cpp - detail/merge_iterator.cpp - backends/backend.cpp - backends/iterator.cpp - backends/map/map_backend.cpp - backends/map/map_iterator.cpp - backends/rocksdb/rocksdb_backend.cpp - backends/rocksdb/rocksdb_iterator.cpp - backends/rocksdb/object_cache.cpp - ${HEADERS} ) - -target_link_libraries(koinos_state_db Koinos::exception Koinos::proto Koinos::crypto RocksDB::rocksdb) -target_include_directories(koinos_state_db PUBLIC - $ - $ -) - -add_library(Koinos::state_db ALIAS koinos_state_db) - -install(FILES ${HEADERS} DESTINATION "include/koinos/state_db") - -export( - TARGETS ${KOINOS_LIB_TARGET_NAME} - NAMESPACE Koinos:: - FILE ${KOINOS_LIB_CMAKE_PROJECT_TARGETS_FILE} -) - -install( - TARGETS ${KOINOS_LIB_TARGET_NAME} - EXPORT ${KOINOS_LIB_TARGETS_EXPORT_NAME} - INCLUDES DESTINATION ${KOINOS_LIB_INCLUDE_INSTALL_DIR} - RUNTIME DESTINATION bin - LIBRARY DESTINATION lib - ARCHIVE DESTINATION lib -) - -install( - EXPORT ${KOINOS_LIB_TARGETS_EXPORT_NAME} - NAMESPACE Koinos:: - DESTINATION ${KOINOS_LIB_CONFIG_INSTALL_DIR} -) diff --git a/libraries/state_db/backends/iterator.cpp b/libraries/state_db/backends/iterator.cpp deleted file mode 100644 index 68a195f..0000000 --- a/libraries/state_db/backends/iterator.cpp +++ /dev/null @@ -1,60 +0,0 @@ -#include -#include - -namespace koinos::state_db::backends { - -iterator::iterator( std::unique_ptr< abstract_iterator > itr ) : _itr( std::move( itr ) ) {} - -iterator::iterator( const iterator& other ) : _itr( other._itr->copy() ) {} - -iterator::iterator( iterator&& other ) : _itr( std::move( other._itr ) ) {} - -const iterator::value_type& iterator::operator*() const -{ - return **_itr; -} - -const iterator::key_type& iterator::key() const -{ - return _itr->key(); -} - -iterator& iterator::operator++() -{ - ++(*_itr); - return *this; -} - -iterator& iterator::operator--() -{ - --(*_itr); - return *this; -} - -iterator& iterator::operator=( iterator&& other ) -{ - _itr = std::move( other._itr ); - return *this; -} - -bool iterator::valid() const -{ - return _itr && _itr->valid(); -} - -bool operator==( const iterator& x, const iterator& y ) -{ - if ( x.valid() && y.valid() ) - { - return *x == *y; - } - - return x.valid() == y.valid(); -} - -bool operator!=( const iterator& x, const iterator& y ) -{ - return !( x == y ); -} - -} // koinos::state_db::backends diff --git a/libraries/state_db/backends/map/map_backend.cpp b/libraries/state_db/backends/map/map_backend.cpp deleted file mode 100644 index 32b0dab..0000000 --- a/libraries/state_db/backends/map/map_backend.cpp +++ /dev/null @@ -1,71 +0,0 @@ -#include - -namespace koinos::state_db::backends::map { - -map_backend::map_backend() {} - -map_backend::~map_backend() {} - -iterator map_backend::begin() noexcept -{ - return iterator( std::make_unique< map_iterator >( std::make_unique< map_iterator::iterator_impl >( _map.begin() ), _map ) ); -} - -iterator map_backend::end() noexcept -{ - return iterator( std::make_unique< map_iterator >( std::make_unique< map_iterator::iterator_impl >( _map.end() ), _map ) ); -} - -void map_backend::put( const key_type& k, const value_type& v ) -{ - _map.insert_or_assign( k, v ); -} - -const map_backend::value_type* map_backend::get( const key_type& key ) const -{ - auto itr = _map.find( key ); - if ( itr == _map.end() ) - { - return nullptr; - } - - return &itr->second; -} - -void map_backend::erase( const key_type& k ) -{ - _map.erase( k ); -} - -void map_backend::clear() noexcept -{ - _map.clear(); -} - -map_backend::size_type map_backend::size() const noexcept -{ - return _map.size(); -} - -iterator map_backend::find( const key_type& k ) -{ - return iterator( std::make_unique< map_iterator >( std::make_unique< map_iterator::iterator_impl >( _map.find( k ) ), _map ) ); -} - -iterator map_backend::lower_bound( const key_type& k ) -{ - return iterator( std::make_unique< map_iterator >( std::make_unique< map_iterator::iterator_impl >( _map.lower_bound( k ) ), _map ) ); -} - -void map_backend::start_write_batch() {} - -void map_backend::end_write_batch() {} - -void map_backend::store_metadata() {} - -std::shared_ptr< abstract_backend > map_backend::clone() const -{ - return std::make_shared< map_backend >( *this ); -} - -} // koinos::state_db::backends::map diff --git a/libraries/state_db/backends/map/map_iterator.cpp b/libraries/state_db/backends/map/map_iterator.cpp deleted file mode 100644 index 6d37e73..0000000 --- a/libraries/state_db/backends/map/map_iterator.cpp +++ /dev/null @@ -1,51 +0,0 @@ -#include - -#include - -namespace koinos::state_db::backends::map { - -map_iterator::map_iterator( std::unique_ptr< std::map< detail::key_type, detail::value_type >::iterator > itr, - const std::map< detail::key_type, detail::value_type >& map ) : - _itr( std::move( itr ) ), - _map( map ) - {} - -map_iterator::~map_iterator() {} - -const map_iterator::value_type& map_iterator::operator*() const -{ - KOINOS_ASSERT( valid(), iterator_exception, "iterator operation is invalid" ); - return (*_itr)->second; -} - -const map_iterator::key_type& map_iterator::key() const -{ - KOINOS_ASSERT( valid(), iterator_exception, "iterator operation is invalid" ); - return (*_itr)->first; -} - -abstract_iterator& map_iterator::operator++() -{ - KOINOS_ASSERT( valid(), iterator_exception, "iterator operation is invalid" ); - ++(*_itr); - return *this; -} - -abstract_iterator& map_iterator::operator--() -{ - KOINOS_ASSERT( *_itr != _map.begin(), iterator_exception, "iterator operation is invalid" ); - --(*_itr); - return *this; -} - -bool map_iterator::valid() const -{ - return _itr && *_itr != _map.end(); -} - -std::unique_ptr< abstract_iterator > map_iterator::copy() const -{ - return std::make_unique< map_iterator >( std::make_unique< std::map< detail::key_type, detail::value_type >::iterator >( *_itr ), _map ); -} - -} // koinos::state_db::backends::map diff --git a/libraries/state_db/backends/rocksdb/object_cache.cpp b/libraries/state_db/backends/rocksdb/object_cache.cpp deleted file mode 100644 index 9bde358..0000000 --- a/libraries/state_db/backends/rocksdb/object_cache.cpp +++ /dev/null @@ -1,73 +0,0 @@ -#include - -#include - -namespace koinos::state_db::backends::rocksdb { - -object_cache::object_cache( std::size_t size ) : _cache_max_size( size ) {} - -object_cache::~object_cache() {} - -std::pair< bool, std::shared_ptr< const object_cache::value_type > > object_cache::get( const key_type& k ) -{ - auto itr = _object_map.find( k ); - if ( itr == _object_map.end() ) - return std::make_pair( false, std::shared_ptr< const object_cache::value_type >() ); - - // Erase the entry from the list and push front - _lru_list.erase( itr->second.second ); - _lru_list.push_front( k ); - auto val = itr->second.first; - - _object_map[ k ] = std::make_pair( val, _lru_list.begin() ); - - assert( _object_map.size() == _lru_list.size() ); - - return std::make_pair( true, val ); -} - -std::shared_ptr< const object_cache::value_type > object_cache::put( const key_type& k, std::shared_ptr< const object_cache::value_type > v ) -{ - remove( k ); - - // Min 1 byte for key and 1 byte for value - auto entry_size = std::max( k.size() + ( v ? v->size() : 0 ), std::size_t( 2 ) ); - - // If the cache is full, remove the last entry from the map and pop back - while ( _cache_size + entry_size > _cache_max_size ) - remove( _lru_list.back() ); - - _lru_list.push_front( k ); - _object_map[ k ] = std::make_pair( v, _lru_list.begin() ); - _cache_size += entry_size; - - assert( _object_map.size() == _lru_list.size() ); - - return v; -} - -void object_cache::remove( const key_type& k ) -{ - auto itr = _object_map.find( k ); - if ( itr != _object_map.end() ) - { - _cache_size -= std::max( k.size() + ( itr->second.first ? itr->second.first->size() : 0 ), std::size_t( 2 ) ); - _lru_list.erase( itr->second.second ); - _object_map.erase( itr ); - } - - assert( _object_map.size() == _lru_list.size() ); -} - -void object_cache::clear() -{ - _object_map.clear(); - _lru_list.clear(); -} - -std::mutex& object_cache::get_mutex() -{ - return _mutex; -} - -} // koinos::state_db::backends::rocksdb diff --git a/libraries/state_db/backends/rocksdb/rocksdb_backend.cpp b/libraries/state_db/backends/rocksdb/rocksdb_backend.cpp deleted file mode 100644 index dd81d52..0000000 --- a/libraries/state_db/backends/rocksdb/rocksdb_backend.cpp +++ /dev/null @@ -1,515 +0,0 @@ -#include - -#include -#include -#include -#include - -#include -#include - -namespace koinos::state_db::backends::rocksdb { - -namespace constants { - constexpr std::size_t cache_size = 64 << 20; // 64 MB - constexpr std::size_t max_open_files = 64; - - constexpr std::size_t default_column_index = 0; - const std::string objects_column_name = "objects"; - constexpr std::size_t objects_column_index = 1; - const std::string metadata_column_name = "metadata"; - constexpr std::size_t metadata_column_index = 2; - - const std::string size_key = "size"; - const std::string revision_key = "revision"; - const std::string id_key = "id"; - const std::string merkle_root_key = "merkle_root"; - const std::string block_header_key = "block_header"; - - constexpr rocksdb_backend::size_type size_default = 0; - constexpr rocksdb_backend::size_type revision_default = 0; - const crypto::multihash id_default = crypto::multihash::zero( crypto::multicodec::sha2_256 ); - const crypto::multihash merkle_root_default = crypto::multihash::zero( crypto::multicodec::sha2_256 ); - const protocol::block_header block_header_default = protocol::block_header(); -} // constants - -bool setup_database( const std::filesystem::path& p ) -{ - std::vector< ::rocksdb::ColumnFamilyDescriptor > defs; - defs.emplace_back( - constants::objects_column_name, - ::rocksdb::ColumnFamilyOptions() ); - defs.emplace_back( - constants::metadata_column_name, - ::rocksdb::ColumnFamilyOptions() ); - - ::rocksdb::Options options; - options.create_if_missing = true; - - ::rocksdb::DB* db; - auto status = ::rocksdb::DB::Open( options, p.string(), &db ); - - KOINOS_ASSERT( status.ok(), rocksdb_open_exception, "unable to open rocksdb database" + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); - - auto db_ptr = std::shared_ptr< ::rocksdb::DB >( db ); - - std::vector< ::rocksdb::ColumnFamilyHandle* > handles; - status = db->CreateColumnFamilies( defs, &handles ); - - if ( !status.ok() ) - { - return false; - } - - std::vector< std::shared_ptr< ::rocksdb::ColumnFamilyHandle > > handle_ptrs; - - for ( auto* h : handles ) - handle_ptrs.emplace_back( h ); - - ::rocksdb::WriteOptions wopts; - - status = db_ptr->Put( - wopts, - &*handle_ptrs[ 1 ], - ::rocksdb::Slice( constants::size_key ), - ::rocksdb::Slice( util::converter::as< std::string >( constants::size_default ) ) - ); - - if ( !status.ok() ) - { - handle_ptrs.clear(); - db_ptr.reset(); - return false; - } - - status = db_ptr->Put( - wopts, - &*handle_ptrs[ 1 ], - ::rocksdb::Slice( constants::revision_key ), - ::rocksdb::Slice( util::converter::as< std::string >( constants::revision_default ) ) - ); - - if ( !status.ok() ) - { - handle_ptrs.clear(); - db_ptr.reset(); - return false; - } - - status = db_ptr->Put( - wopts, - &*handle_ptrs[ 1 ], - ::rocksdb::Slice( constants::id_key ), - ::rocksdb::Slice( util::converter::as< std::string >( constants::id_default ) ) - ); - - if ( !status.ok() ) - { - handle_ptrs.clear(); - db_ptr.reset(); - return false; - } - - status = db_ptr->Put( - wopts, - &*handle_ptrs[ 1 ], - ::rocksdb::Slice( constants::merkle_root_key ), - ::rocksdb::Slice( util::converter::as< std::string >( constants::merkle_root_default ) ) - ); - - if ( !status.ok() ) - { - handle_ptrs.clear(); - db_ptr.reset(); - return false; - } - - status = db_ptr->Put( - wopts, - &*handle_ptrs[ 1 ], - ::rocksdb::Slice( constants::block_header_key ), - ::rocksdb::Slice( util::converter::as< std::string >( constants::block_header_default ) ) - ); - - handle_ptrs.clear(); - db_ptr.reset(); - - return status.ok(); -} - -rocksdb_backend::rocksdb_backend() : - _cache( std::make_shared< object_cache >( constants::cache_size ) ), - _ropts( std::make_shared< ::rocksdb::ReadOptions >() ) -{} - -rocksdb_backend::~rocksdb_backend() -{ - close(); -} - -void rocksdb_backend::open( const std::filesystem::path& p ) -{ - KOINOS_ASSERT( p.is_absolute(), rocksdb_open_exception, "path must be absolute, ${p}", ("p", p.string()) ); - KOINOS_ASSERT( std::filesystem::exists( p ), rocksdb_open_exception, "path does not exist, ${p}", ("p", p.string()) ); - - std::vector< ::rocksdb::ColumnFamilyDescriptor > defs; - defs.emplace_back( - ::rocksdb::kDefaultColumnFamilyName, - ::rocksdb::ColumnFamilyOptions() ); - defs.emplace_back( - constants::objects_column_name, - ::rocksdb::ColumnFamilyOptions() ); - defs.emplace_back( - constants::metadata_column_name, - ::rocksdb::ColumnFamilyOptions() ); - - std::vector< ::rocksdb::ColumnFamilyHandle* > handles; - - ::rocksdb::Options options; - options.max_open_files = constants::max_open_files; - ::rocksdb::DB* db; - - auto status = ::rocksdb::DB::Open( options, p.string(), defs, &handles, &db ); - - if ( !status.ok() ) - { - KOINOS_ASSERT( setup_database( p ), rocksdb_setup_exception, "unable to configure rocksdb database" ); - - status = ::rocksdb::DB::Open( options, p.string(), defs, &handles, &db ); - KOINOS_ASSERT( status.ok(), rocksdb_open_exception, "unable to open rocksdb database" + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); - } - - _db = std::shared_ptr< ::rocksdb::DB >( db ); - - for ( auto* h : handles ) - _handles.emplace_back( h ); - - try - { - load_metadata(); - } - catch ( ... ) - { - _handles.clear(); - _db.reset(); - throw; - } -} - -void rocksdb_backend::close() -{ - if ( _db ) - { - store_metadata(); - flush(); - - ::rocksdb::CancelAllBackgroundWork( &*_db, true ); - _handles.clear(); - _db.reset(); - std::lock_guard lock( _cache->get_mutex() ); - _cache->clear(); - } -} - -void rocksdb_backend::flush() -{ - KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); - - static const ::rocksdb::FlushOptions flush_options; - - _db->Flush( flush_options, &*_handles[ constants::objects_column_index ] ); - _db->Flush( flush_options, &*_handles[ constants::metadata_column_index ] ); -} - -void rocksdb_backend::start_write_batch() -{ - KOINOS_ASSERT( !_write_batch, rocksdb_session_in_progress, "session already in progress" ); - _write_batch.emplace(); -} - -void rocksdb_backend::end_write_batch() -{ - if ( _write_batch ) - { - auto status = _db->Write( _wopts, &*_write_batch ); - KOINOS_ASSERT( status.ok(), rocksdb_write_exception, "unable to write session to rocksdb database" + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); - _write_batch.reset(); - } -} - -iterator rocksdb_backend::begin() -{ - KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); - - auto itr = std::make_unique< rocksdb_iterator >( _db, _handles[ constants::objects_column_index ], _ropts, _cache ); - itr->_iter = std::unique_ptr< ::rocksdb::Iterator >( _db->NewIterator( *_ropts, &*_handles[ constants::objects_column_index ] ) ); - itr->_iter->SeekToFirst(); - - return iterator( std::unique_ptr< abstract_iterator >( std::move( itr ) ) ); -} - -iterator rocksdb_backend::end() -{ - KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); - - auto itr = std::make_unique< rocksdb_iterator >( _db, _handles[ constants::objects_column_index ], _ropts, _cache ); - itr->_iter = std::unique_ptr< ::rocksdb::Iterator >( _db->NewIterator( *_ropts, &*_handles[ constants::objects_column_index ] ) ); - - return iterator( std::unique_ptr< abstract_iterator >( std::move( itr ) ) ); -} - -void rocksdb_backend::put( const key_type& k, const value_type& v ) -{ - KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); - bool exists = get( k ); - - ::rocksdb::Status status; - - if ( _write_batch ) - { - status = _write_batch->Put( - &*_handles[ constants::objects_column_index ], - ::rocksdb::Slice( k ), - ::rocksdb::Slice( v ) ); - } - else - { - status = _db->Put( - _wopts, - &*_handles[ constants::objects_column_index ], - ::rocksdb::Slice( k ), - ::rocksdb::Slice( v ) ); - } - - KOINOS_ASSERT( status.ok(), rocksdb_write_exception, "unable to write to rocksdb database" + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); - - if ( !exists ) - { - _size++; - } - - std::lock_guard lock( _cache->get_mutex() ); - _cache->put( k, std::make_shared< const object_cache::value_type >( v ) ); -} - -const rocksdb_backend::value_type* rocksdb_backend::get( const key_type& k ) const -{ - KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); - - std::lock_guard lock( _cache->get_mutex() ); - auto [cache_hit, ptr] = _cache->get( k ); - if ( cache_hit ) - { - if ( ptr ) - return &*ptr; - - return nullptr; - } - - - value_type value; - auto status = _db->Get( - *_ropts, - &*_handles[ constants::objects_column_index ], - ::rocksdb::Slice( k ), - &value - ); - - if ( status.ok() ) - return &*_cache->put( k, std::make_shared< const object_cache::value_type >( value ) ); - else if ( status.IsNotFound() ) - _cache->put( k, std::shared_ptr< const object_cache::value_type >() ); - - return nullptr; -} - -void rocksdb_backend::erase( const key_type& k ) -{ - KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); - - bool exists = get( k ); - auto status = _db->Delete( - _wopts, - &*_handles[ constants::objects_column_index ], - ::rocksdb::Slice( k ) ); - - KOINOS_ASSERT( status.ok(), rocksdb_write_exception, "unable to write to rocksdb database" + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); - - if ( exists ) - { - _size--; - } - - std::lock_guard lock( _cache->get_mutex() ); - _cache->put( k, std::shared_ptr< const object_cache::value_type >() ); -} - -void rocksdb_backend::clear() -{ - KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); - - for ( auto h : _handles ) - { - _db->DropColumnFamily( &*h ); - } - - _handles.clear(); - _db.reset(); - std::lock_guard lock( _cache->get_mutex() ); - _cache->clear(); -} - -rocksdb_backend::size_type rocksdb_backend::size() const -{ - KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); - - return _size; -} - -iterator rocksdb_backend::find( const key_type& k ) -{ - KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); - - auto itr = std::make_unique< rocksdb_iterator >( _db, _handles[ constants::objects_column_index ], _ropts, _cache ); - auto itr_ptr = std::unique_ptr< ::rocksdb::Iterator >( _db->NewIterator( *_ropts, &*_handles[ constants::objects_column_index ] ) ); - - itr_ptr->Seek( ::rocksdb::Slice( k ) ); - - if ( itr_ptr->Valid() ) - { - auto key_slice = itr_ptr->key(); - - if ( k.size() == key_slice.size() - && memcmp( k.data(), key_slice.data(), k.size() ) == 0 ) - { - itr->_iter = std::move( itr_ptr ); - } - } - - return iterator( std::unique_ptr< abstract_iterator >( std::move( itr ) ) ); -} - -iterator rocksdb_backend::lower_bound( const key_type& k ) -{ - KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); - - auto itr = std::make_unique< rocksdb_iterator >( _db, _handles[ constants::objects_column_index ], _ropts, _cache ); - itr->_iter = std::unique_ptr< ::rocksdb::Iterator >( _db->NewIterator( *_ropts, &*_handles[ constants::objects_column_index ] ) ); - - itr->_iter->Seek( ::rocksdb::Slice( k ) ); - - return iterator( std::unique_ptr< abstract_iterator >( std::move( itr ) ) ); -} - -void rocksdb_backend::load_metadata() -{ - KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); - - std::string value; - auto status = _db->Get( - *_ropts, - &*_handles[ constants::metadata_column_index ], - ::rocksdb::Slice( constants::size_key ), - &value ); - - KOINOS_ASSERT( status.ok(), rocksdb_read_exception, "unable to read from rocksdb database" + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); - - _size = util::converter::to< size_type >( value ); - - status = _db->Get( - *_ropts, - &*_handles[ constants::metadata_column_index ], - ::rocksdb::Slice( constants::revision_key ), - &value ); - - KOINOS_ASSERT( status.ok(), rocksdb_read_exception, "unable to read from rocksdb database" + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); - - set_revision( util::converter::to< size_type >( value ) ); - - status = _db->Get( - *_ropts, - &*_handles[ constants::metadata_column_index ], - ::rocksdb::Slice( constants::id_key ), - &value ); - - KOINOS_ASSERT( status.ok(), rocksdb_read_exception, "unable to read from rocksdb database" + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); - - set_id( util::converter::to< crypto::multihash >( value ) ); - - status = _db->Get( - *_ropts, - &*_handles[ constants::metadata_column_index ], - ::rocksdb::Slice( constants::merkle_root_key ), - &value ); - - KOINOS_ASSERT( status.ok(), rocksdb_read_exception, "unable to read from rocksdb database" + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); - - set_merkle_root( util::converter::to< crypto::multihash >( value ) ); - - status = _db->Get( - *_ropts, - &*_handles[ constants::metadata_column_index ], - ::rocksdb::Slice( constants::block_header_key ), - &value ); - - KOINOS_ASSERT( status.ok(), rocksdb_read_exception, "unable to read from rocksdb database" + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); - - set_block_header( util::converter::to< protocol::block_header >( value ) ); -} - -void rocksdb_backend::store_metadata() -{ - KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); - - auto status = _db->Put( - _wopts, - &*_handles[ constants::metadata_column_index ], - ::rocksdb::Slice( constants::size_key ), - ::rocksdb::Slice( util::converter::as< std::string >( _size ) ) - ); - - KOINOS_ASSERT( status.ok(), rocksdb_write_exception, "unable to write to rocksdb database" + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); - - status = _db->Put( - _wopts, - &*_handles[ constants::metadata_column_index ], - ::rocksdb::Slice( constants::revision_key ), - ::rocksdb::Slice( util::converter::as< std::string >( revision() ) ) - ); - - KOINOS_ASSERT( status.ok(), rocksdb_write_exception, "unable to write to rocksdb database" + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); - - status = _db->Put( - _wopts, - &*_handles[ constants::metadata_column_index ], - ::rocksdb::Slice( constants::id_key ), - ::rocksdb::Slice( util::converter::as< std::string >( id() ) ) - ); - - KOINOS_ASSERT( status.ok(), rocksdb_write_exception, "unable to write to rocksdb database" + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); - - status = _db->Put( - _wopts, - &*_handles[ constants::metadata_column_index ], - ::rocksdb::Slice( constants::merkle_root_key ), - ::rocksdb::Slice( util::converter::as< std::string >( merkle_root() ) ) - ); - - KOINOS_ASSERT( status.ok(), rocksdb_write_exception, "unable to write to rocksdb database" + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); - - status = _db->Put( - _wopts, - &*_handles[ constants::metadata_column_index ], - ::rocksdb::Slice( constants::block_header_key ), - ::rocksdb::Slice( util::converter::as< std::string >( block_header() ) ) - ); - - KOINOS_ASSERT( status.ok(), rocksdb_write_exception, "unable to write to rocksdb database" + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); -} - -std::shared_ptr< abstract_backend > rocksdb_backend::clone() const -{ - KOINOS_THROW( internal_exception, "rocksdb_backend, 'clone' not implemented" ); -} - -} // koinos::state_db::backends::rocksdb diff --git a/libraries/state_db/backends/rocksdb/rocksdb_iterator.cpp b/libraries/state_db/backends/rocksdb/rocksdb_iterator.cpp deleted file mode 100644 index ce0351c..0000000 --- a/libraries/state_db/backends/rocksdb/rocksdb_iterator.cpp +++ /dev/null @@ -1,131 +0,0 @@ -#include - -#include - -namespace koinos::state_db::backends::rocksdb { - -rocksdb_iterator::rocksdb_iterator( - std::shared_ptr< ::rocksdb::DB > db, - std::shared_ptr< ::rocksdb::ColumnFamilyHandle > handle, - std::shared_ptr< const ::rocksdb::ReadOptions > opts, - std::shared_ptr< object_cache > cache -) : - _db( db ), - _handle( handle ), - _opts( opts ), - _cache( cache ) -{} - -rocksdb_iterator::rocksdb_iterator( const rocksdb_iterator& other ) : - _db( other._db ), - _handle( other._handle ), - _opts( other._opts ), - _cache( other._cache ), - _cache_value( other._cache_value ) -{ - if ( other._iter ) - { - _iter.reset( _db->NewIterator( *_opts, &*_handle ) ); - - if( other._iter->Valid() ) - { - _iter->Seek( other._iter->key() ); - } - } -} - -rocksdb_iterator::~rocksdb_iterator() {} - -const rocksdb_iterator::value_type& rocksdb_iterator::operator*() const -{ - KOINOS_ASSERT( valid(), iterator_exception, "iterator operation is invalid" ); - - if ( !_cache_value ) - { - update_cache_value(); - } - - return *_cache_value; -} - -const rocksdb_iterator::key_type& rocksdb_iterator::key() const -{ - KOINOS_ASSERT( valid(), iterator_exception, "iterator operation is invalid" ); - - if ( !_key ) - { - update_cache_value(); - } - - return *_key; -} - -abstract_iterator& rocksdb_iterator::operator++() -{ - KOINOS_ASSERT( valid(), iterator_exception, "iterator operation is invalid" ); - - _iter->Next(); - KOINOS_ASSERT( _iter->status().ok(), iterator_exception, "iterator operation is invalid" ); - - update_cache_value(); - - return *this; -} - -abstract_iterator& rocksdb_iterator::operator--() -{ - if ( !valid() ) - { - _iter.reset( _db->NewIterator( *_opts, &*_handle ) ); - _iter->SeekToLast(); - } - else - { - _iter->Prev(); - KOINOS_ASSERT( _iter->status().ok(), iterator_exception, "iterator operation is invalid" ); - } - - update_cache_value(); - - return *this; -} - -bool rocksdb_iterator::valid() const -{ - return _iter && _iter->Valid(); -} - -std::unique_ptr< abstract_iterator > rocksdb_iterator::copy() const -{ - return std::make_unique< rocksdb_iterator >( *this ); -} - -void rocksdb_iterator::update_cache_value() const -{ - if ( valid() ) - { - auto key_slice = _iter->key(); - auto key = std::make_shared< std::string >( key_slice.data(), key_slice.size() ); - std::lock_guard< std::mutex > lock( _cache->get_mutex() ); - auto [cache_hit, ptr] = _cache->get( *key ); - - if ( cache_hit ) - KOINOS_ASSERT( ptr, rocksdb_internal_exception, "iterator erroneously hit null value in cache" ); - - if ( !ptr ) - { - auto value_slice = _iter->value(); - ptr = _cache->put( *key, std::make_shared< const object_cache::value_type >( value_slice.data(), value_slice.size() ) ); - } - - _cache_value = ptr; - _key = key; - } - else - { - _cache_value.reset(); - _key.reset(); - } -} - -} // koinos::state_db::backends::rocksdb diff --git a/libraries/state_db/detail/merge_iterator.cpp b/libraries/state_db/detail/merge_iterator.cpp deleted file mode 100644 index d538bb2..0000000 --- a/libraries/state_db/detail/merge_iterator.cpp +++ /dev/null @@ -1,262 +0,0 @@ -#include - -namespace koinos::state_db::detail { - -iterator_wrapper::iterator_wrapper( backends::iterator&& i, uint64_t r, std::shared_ptr< backends::abstract_backend > b ) : - itr( std::move( i ) ), - revision( r ), - backend( b ) -{} - -iterator_wrapper::iterator_wrapper( iterator_wrapper&& i ) : - itr( std::move( i.itr ) ), - revision( i.revision ), - backend( i.backend ) -{} - -iterator_wrapper::iterator_wrapper( const iterator_wrapper& i ) : - itr( i.itr ), - revision( i.revision ), - backend( i.backend ) -{} - -const iterator_wrapper& iterator_wrapper::self() const -{ - return *this; -} - -bool iterator_wrapper::valid() const -{ - return itr != backend->end(); -} - -bool iterator_compare_less::operator()( const iterator_wrapper& lhs, const iterator_wrapper& rhs ) const -{ - bool lh_valid = lhs.valid(); - bool rh_valid = rhs.valid(); - - if ( !lh_valid && !rh_valid ) return lhs.revision > rhs.revision; - if ( !lh_valid ) return false; - if ( !rh_valid ) return true; - - return lhs.itr.key() < rhs.itr.key(); -} - -bool iterator_compare_greater::operator()( const iterator_wrapper& lhs, const iterator_wrapper& rhs ) const -{ - bool lh_valid = lhs.valid(); - bool rh_valid = rhs.valid(); - - if ( !lh_valid && !rh_valid ) return lhs.revision > rhs.revision; - if ( !lh_valid ) return false; - if ( !rh_valid ) return true; - - return rhs.itr.key() < lhs.itr.key(); -} - -merge_iterator::merge_iterator( const merge_iterator& other ) : - _itr_revision_index( other._itr_revision_index ), - _delta_deque( other._delta_deque ) -{} - -bool merge_iterator::operator==( const merge_iterator& other ) const -{ - // If both iterators are empty, they are true. - // But we use empty merge iterators as an optimization for an end itertor. - // So if one is empty, and the other is all end iterators, they are also equal. - if ( _itr_revision_index.size() == 0 && other._itr_revision_index.size() == 0 ) return true; - else if ( _itr_revision_index.size() == 0 ) return other.is_end(); - else if ( other._itr_revision_index.size() == 0 ) return is_end(); - - auto my_begin = _itr_revision_index.begin(); - auto other_begin = other._itr_revision_index.begin(); - - if ( !my_begin->valid() && !other_begin->valid() ) return true; - if ( !my_begin->valid() || !other_begin->valid() ) return false; - if ( my_begin->revision != other_begin->revision ) return false; - - return my_begin->itr == other_begin->itr; -} - -merge_iterator& merge_iterator::operator++() -{ - auto first_itr = _itr_revision_index.begin(); - KOINOS_ASSERT( first_itr->valid(), koinos::exception, "" ); - - _itr_revision_index.modify( first_itr, []( iterator_wrapper& i ){ ++(i.itr); } ); - resolve_conflicts(); - - return *this; -} - -merge_iterator& merge_iterator::operator--() -{ - const auto& order_idx = _itr_revision_index.template get< by_order_revision >(); - - auto head_itr = order_idx.begin(); - std::optional< key_type > head_key; - - if( head_itr->valid() ) - { - head_key = head_itr->itr.key(); - } - - /* We are grabbing the current head value. - * Then iterate over all other iterators and rewind them until they have a value less - * than the current value. One of those values is what we want to decrement to. - */ - const auto& rev_idx = _itr_revision_index.template get< by_revision >(); - for( auto rev_itr = rev_idx.begin(); rev_itr != rev_idx.end(); ++rev_itr ) - { - // Only decrement iterators that have modified objects - if( rev_itr->backend->size() ) - { - auto begin = rev_itr->backend->begin(); - - if( !head_key ) - { - // If there was no valid key, then bring back each iterator once, it is gauranteed to be less than the - // current value (end()). - _itr_revision_index.modify( _itr_revision_index.iterator_to( *rev_itr ), [&]( iterator_wrapper& i ){ --(i.itr); } ); - } - else - { - // Do an initial decrement if the iterator currently points to end() - if( !rev_itr->valid() ) - { - _itr_revision_index.modify( _itr_revision_index.iterator_to( *rev_itr ), [&]( iterator_wrapper& i ){ --(i.itr); } ); - } - - // Decrement back to the first key that is less than the head key - while( rev_itr->itr.key() >= *head_key && rev_itr->itr != begin ) - { - _itr_revision_index.modify( _itr_revision_index.iterator_to( *rev_itr ), [&]( iterator_wrapper& i ){ --(i.itr); } ); - } - } - - // The key at this point is guaranteed to be less than the head key (or at begin() and greator), but it - // might have been modified in a later index. We need to continue decrementing until we have a valid key. - bool dirty = true; - - while( dirty && rev_itr->valid() && rev_itr->itr != begin ) - { - dirty = is_dirty( rev_itr ); - - if( dirty ) - { - _itr_revision_index.modify( _itr_revision_index.iterator_to( *(rev_itr) ), [](iterator_wrapper& i ){ --(i.itr); } ); - } - } - } - } - - const auto& rev_order_idx = _itr_revision_index.template get< by_reverse_order_revision >(); - auto least_itr = rev_order_idx.begin(); - - if( _delta_deque.size() > 1 ) - { - // This next bit works in two modes. - // Some indices may not have had a value less than the previous head, so they will show up first, - // we need to increment through those values until we get the the new valid least value. - if( head_key ) - { - while( least_itr != rev_order_idx.end() && least_itr->valid() - && ( is_dirty( least_itr ) || least_itr->itr.key() >= *head_key ) ) - { - ++least_itr; - } - } - - // Now least_itr points to the new least value, unless it is end() - if( least_itr != rev_order_idx.end() ) - { - ++least_itr; - } - - // Now least_itr points to the next value. All of these are too much less, but are guaranteed to be valid. - // All values in this indices one past are gauranteed to be greater than the new least, or invalid by - // modification. We can increment all of them once, and then call resolve_conflicts for the new least value - // to become the head. - while( least_itr != rev_order_idx.end() && least_itr->valid() ) - { - _itr_revision_index.modify( _itr_revision_index.iterator_to( *(least_itr--) ), [](iterator_wrapper& i ){ ++(i.itr); } ); - ++least_itr; - } - - resolve_conflicts(); - } - - return *this; -} - -const merge_iterator::value_type& merge_iterator::operator*() const -{ - return _itr_revision_index.begin()->itr.operator *(); -} - -const merge_iterator::key_type& merge_iterator::key() const -{ - auto first_itr = _itr_revision_index.begin(); - KOINOS_ASSERT( first_itr->valid(), koinos::exception, "" ); - - return first_itr->itr.key(); -} - -void merge_iterator::resolve_conflicts() -{ - auto first_itr = _itr_revision_index.begin(); - bool dirty = true; - - while( dirty && first_itr->valid() ) - { - dirty = is_dirty( first_itr ); - - if( dirty ) - { - _itr_revision_index.modify( first_itr, [](iterator_wrapper& i ){ ++(i.itr); } ); - } - - first_itr = _itr_revision_index.begin(); - } -} - -bool merge_iterator::is_end() const -{ - return std::all_of( _itr_revision_index.begin(), _itr_revision_index.end(), - []( auto& i ){ return !i.valid(); } ); -} - -merge_state::merge_state( std::shared_ptr< state_delta > head ) : - _head( head ) -{} - -merge_iterator merge_state::begin() const -{ - return merge_iterator( _head, [&]( std::shared_ptr< backends::abstract_backend > backend ) - { - return backend->begin(); - }); -} - -merge_iterator merge_state::end() const -{ - return merge_iterator( _head, [&]( std::shared_ptr< backends::abstract_backend > backend ) - { - return backend->end(); - }); -} - -const merge_state::value_type* merge_state::find( const key_type& key ) const -{ - return _head->find( key ); -} - -merge_iterator merge_state::lower_bound( const key_type& key ) const -{ - return merge_iterator( _head, [&]( std::shared_ptr< backends::abstract_backend > backend ) - { - return backend->lower_bound( key ); - }); -} - -} // koinos::state_db::detail diff --git a/libraries/state_db/detail/state_delta.cpp b/libraries/state_db/detail/state_delta.cpp deleted file mode 100644 index 67b1807..0000000 --- a/libraries/state_db/detail/state_delta.cpp +++ /dev/null @@ -1,373 +0,0 @@ -#include - -#include - -namespace koinos::state_db::detail { - -using backend_type = state_delta::backend_type; -using value_type = state_delta::value_type; - -state_delta::state_delta( const std::optional< std::filesystem::path >& p ) -{ - if ( p ) - { - auto backend = std::make_shared< backends::rocksdb::rocksdb_backend >(); - backend->open( *p ); - _backend = backend; - } - else - { - _backend = std::make_shared< backends::map::map_backend >(); - } - - _revision = _backend->revision(); - _id = _backend->id(); - _merkle_root = _backend->merkle_root(); -} - -void state_delta::put( const key_type& k, const value_type& v ) -{ - _backend->put( k, v ); -} - -void state_delta::erase( const key_type& k ) -{ - if ( find( k ) ) - { - _backend->erase( k ); - _removed_objects.insert( k ); - } -} - -const value_type* state_delta::find( const key_type& key ) const -{ - if ( auto val_ptr = _backend->get( key ); val_ptr ) - return val_ptr; - - if ( is_removed( key ) ) - return nullptr; - - return is_root() ? nullptr : _parent->find( key ); -} - -void state_delta::squash() -{ - if ( is_root() ) - return; - - // If an object is removed here and exists in the parent, it needs to only be removed in the parent - // If an object is modified here, but removed in the parent, it needs to only be modified in the parent - // These are O(m log n) operations. Because of this, squash should only be called from anonymouse state - // nodes, whose modifications are much smaller - for ( const key_type& r_key : _removed_objects ) - { - _parent->_backend->erase( r_key ); - - if ( !_parent->is_root() ) - { - _parent->_removed_objects.insert( r_key ); - } - } - - for ( auto itr = _backend->begin(); itr != _backend->end(); ++itr ) - { - _parent->_backend->put( itr.key(), *itr ); - - if ( !_parent->is_root() ) - { - _parent->_removed_objects.erase( itr.key() ); - } - } -} - -void state_delta::commit() -{ - /** - * commit works in two distinct phases. The first is head recursion until we are at the root - * delta. At the root, we grab the backend and begin a write batch that will encompass all - * state writes and the final write of the metadata. - * - * The second phase is popping off the stack, writing state to the backend. After all deltas - * have been written to the backend, we write metadata to the backend and end the write batch. - * - * The result is this delta becomes the new root delta and state is written to the root backend - * atomically. - */ - KOINOS_ASSERT( !is_root(), internal_error, "cannot commit root" ); - - std::vector< std::shared_ptr< state_delta > > node_stack; - auto current_node = shared_from_this(); - - while ( current_node ) - { - node_stack.push_back( current_node ); - current_node = current_node->_parent; - } - - // Because we already asserted we were not root, there will always exist a minimum of two nodes in the stack, - // this and root. - auto backend = node_stack.back()->_backend; - node_stack.back()->_backend.reset(); - node_stack.pop_back(); - - // Start the write batch - backend->start_write_batch(); - - // While there are nodes on the stack, write them to the backend - while ( node_stack.size() ) - { - auto& node = node_stack.back(); - - for ( const key_type& r_key : node->_removed_objects ) - { - backend->erase( r_key ); - } - - for ( auto itr = node->_backend->begin(); itr != node->_backend->end(); ++itr ) - { - backend->put( itr.key(), *itr ); - } - - node_stack.pop_back(); - } - - // Update metadata on the backend - backend->set_block_header( block_header() ); - backend->set_revision( _revision ); - backend->set_id( _id ); - backend->set_merkle_root( merkle_root() ); - backend->store_metadata(); - - // End the write batch making the entire merge atomic - backend->end_write_batch(); - - // Reset local variables to match new status as root delta - _removed_objects.clear(); - _backend = backend; - _parent.reset(); -} - -void state_delta::clear() -{ - _backend->clear(); - _removed_objects.clear(); - - _revision = 0; - _id = crypto::multihash::zero( crypto::multicodec::sha2_256 ); -} - -bool state_delta::is_modified( const key_type& k ) const -{ - return _backend->get( k ) || _removed_objects.find( k ) != _removed_objects.end(); -} - -bool state_delta::is_removed( const key_type& k ) const -{ - return _removed_objects.find( k ) != _removed_objects.end(); -} - -bool state_delta::is_root() const -{ - return !_parent; -} - -uint64_t state_delta::revision() const -{ - return _revision; -} - -void state_delta::set_revision( uint64_t revision ) -{ - _revision = revision; - if ( is_root() ) - { - _backend->set_revision( revision ); - } -} - -bool state_delta::is_finalized() const -{ - return _finalized; -} - -void state_delta::finalize() -{ - _finalized = true; -} - -std::condition_variable_any& state_delta::cv() -{ - return _cv; -} - -std::timed_mutex& state_delta::cv_mutex() -{ - return _cv_mutex; -} - -crypto::multihash state_delta::merkle_root() const -{ - if ( !_merkle_root ) - { - std::vector< std::string > object_keys; - object_keys.reserve( _backend->size() + _removed_objects.size() ); - for ( auto itr = _backend->begin(); itr != _backend->end(); ++itr ) - { - object_keys.push_back( itr.key() ); - } - - for ( const auto& removed : _removed_objects ) - { - object_keys.push_back( removed ); - } - - std::sort( - object_keys.begin(), - object_keys.end() - ); - - std::vector< crypto::multihash > merkle_leafs; - merkle_leafs.reserve( object_keys.size() * 2 ); - - for ( const auto& key : object_keys ) - { - merkle_leafs.emplace_back( crypto::hash( crypto::multicodec::sha2_256, key ) ); - auto val_ptr = _backend->get( key ); - merkle_leafs.emplace_back( crypto::hash( crypto::multicodec::sha2_256, val_ptr ? *val_ptr : std::string() ) ); - } - - _merkle_root = crypto::merkle_tree( crypto::multicodec::sha2_256, merkle_leafs ).root()->hash(); - } - - return *_merkle_root; -} - -const protocol::block_header& state_delta::block_header() const -{ - return _backend->block_header(); -} - -std::shared_ptr< state_delta > state_delta::make_child( const state_node_id& id, const protocol::block_header& header ) -{ - auto child = std::make_shared< state_delta >(); - child->_parent = shared_from_this(); - child->_id = id; - child->_revision = _revision + 1; - child->_backend = std::make_shared< backends::map::map_backend >(); - child->_backend->set_block_header( header ); - - return child; -} - -std::shared_ptr< state_delta > state_delta::clone( const state_node_id& id, const protocol::block_header& header ) -{ - auto new_node = std::make_shared< state_delta >(); - new_node->_parent = _parent; - new_node->_backend = _backend->clone(); - new_node->_removed_objects = _removed_objects; - - new_node->_id = id; - new_node->_revision = _revision; - new_node->_merkle_root = _merkle_root; - - new_node->_finalized = _finalized; - - new_node->_backend->set_id( id ); - new_node->_backend->set_revision( _revision ); - new_node->_backend->set_block_header( header ); - - if ( _merkle_root ) - { - new_node->_backend->set_merkle_root( *_merkle_root ); - } - - return new_node; -} - -const std::shared_ptr< backend_type > state_delta::backend() const -{ - return _backend; -} - -const state_node_id& state_delta::id() const -{ - return _id; -} - -const state_node_id& state_delta::parent_id() const -{ - static const state_node_id null_id; - return _parent ? _parent->_id : null_id; -} - -std::shared_ptr< state_delta > state_delta::parent() const -{ - return _parent; -} - -bool state_delta::is_empty() const -{ - if ( _backend->size() ) - return false; - else if ( _parent ) - return _parent->is_empty(); - - return true; -} - -std::shared_ptr< state_delta > state_delta::get_root() -{ - if ( !is_root() ) - { - if ( _parent->is_root() ) - return _parent; - else - return _parent->get_root(); - } - - return std::shared_ptr< state_delta >(); -} - -std::vector< protocol::state_delta_entry > state_delta::get_delta_entries() const -{ - std::vector< std::string > object_keys; - object_keys.reserve( _backend->size() + _removed_objects.size() ); - for ( auto itr = _backend->begin(); itr != _backend->end(); ++itr ) { - object_keys.push_back( itr.key() ); - } - - for ( const auto &removed : _removed_objects ) { - object_keys.push_back( removed ); - } - - std::sort( object_keys.begin(), object_keys.end() ); - - std::vector< protocol::state_delta_entry > deltas; - deltas.reserve( object_keys.size() ); - - for ( const auto &key : object_keys ) { - protocol::state_delta_entry entry; - - // Deserialize the key into a database_key object - koinos::chain::database_key db_key; - if ( db_key.ParseFromString( key ) ) - { - entry.mutable_object_space()->set_system( db_key.space().system() ); - entry.mutable_object_space()->set_zone( db_key.space().zone() ); - entry.mutable_object_space()->set_id( db_key.space().id() ); - - entry.set_key( db_key.key() ); - auto value = _backend->get( key ); - - // Set the optional field if not null - if ( value != nullptr ) - entry.set_value( *value ); - - deltas.push_back( entry ); - } - } - - return deltas; -} - -} // koinos::state_db::detail diff --git a/libraries/state_db/include/koinos/state_db/backends/backend.hpp b/libraries/state_db/include/koinos/state_db/backends/backend.hpp deleted file mode 100644 index e579d56..0000000 --- a/libraries/state_db/include/koinos/state_db/backends/backend.hpp +++ /dev/null @@ -1,60 +0,0 @@ -#pragma once - -#include - -#include -#include - -namespace koinos::state_db::backends { - -class abstract_backend -{ - public: - using key_type = detail::key_type; - using value_type = detail::value_type; - using size_type = detail::size_type; - - abstract_backend(); - virtual ~abstract_backend() {}; - - virtual iterator begin() = 0; - virtual iterator end() = 0; - - virtual void put( const key_type& k, const value_type& v ) = 0; - virtual const value_type* get( const key_type& ) const = 0; - virtual void erase( const key_type& k ) = 0; - virtual void clear() = 0; - - virtual size_type size() const = 0; - bool empty() const; - - virtual iterator find( const key_type& k ) = 0; - virtual iterator lower_bound( const key_type& k ) = 0; - - size_type revision() const; - void set_revision( size_type ); - - const crypto::multihash& id() const; - void set_id( const crypto::multihash& ); - - const crypto::multihash& merkle_root() const; - void set_merkle_root( const crypto::multihash& ); - - const protocol::block_header& block_header() const; - void set_block_header( const protocol::block_header& ); - - virtual void start_write_batch() = 0; - virtual void end_write_batch() = 0; - - virtual void store_metadata() = 0; - - virtual std::shared_ptr< abstract_backend > clone() const = 0; - - private: - size_type _revision = 0; - crypto::multihash _id; - crypto::multihash _merkle_root; - protocol::block_header _header; -}; - -} // koinos::state_db::backends diff --git a/libraries/state_db/include/koinos/state_db/backends/iterator.hpp b/libraries/state_db/include/koinos/state_db/backends/iterator.hpp deleted file mode 100644 index c3c96e1..0000000 --- a/libraries/state_db/include/koinos/state_db/backends/iterator.hpp +++ /dev/null @@ -1,62 +0,0 @@ -#pragma once - -#include - -#include - -namespace koinos::state_db::backends { - -class iterator; - -class abstract_iterator -{ - public: - using key_type = detail::key_type; - using value_type = detail::value_type; - - virtual ~abstract_iterator() {}; - - virtual const value_type& operator*() const = 0; - - virtual const key_type& key() const = 0; - - virtual abstract_iterator& operator++() = 0; - virtual abstract_iterator& operator--() = 0; - - private: - friend class iterator; - - virtual bool valid() const = 0; - virtual std::unique_ptr< abstract_iterator > copy() const = 0; -}; - -class iterator final -{ - public: - using key_type = detail::key_type; - using value_type = detail::value_type; - - iterator( std::unique_ptr< abstract_iterator > ); - iterator( const iterator& other ); - iterator( iterator&& other ); - - const value_type& operator*() const; - - const key_type& key() const; - const value_type& value() const; - - iterator& operator++(); - iterator& operator--(); - - iterator& operator=( iterator&& other ); - - friend bool operator==( const iterator& x, const iterator& y ); - friend bool operator!=( const iterator& x, const iterator& y ); - - private: - bool valid() const; - - std::unique_ptr< abstract_iterator > _itr; -}; - -} // koinos::state_db::backends diff --git a/libraries/state_db/include/koinos/state_db/backends/map/map_backend.hpp b/libraries/state_db/include/koinos/state_db/backends/map/map_backend.hpp deleted file mode 100644 index 83a4de1..0000000 --- a/libraries/state_db/include/koinos/state_db/backends/map/map_backend.hpp +++ /dev/null @@ -1,45 +0,0 @@ -#pragma once - -#include -#include - -namespace koinos::state_db::backends::map { - -class map_backend final : public abstract_backend { - public: - using key_type = abstract_backend::key_type; - using value_type = abstract_backend::value_type; - using size_type = abstract_backend::size_type; - - map_backend(); - virtual ~map_backend() override; - - // Iterators - virtual iterator begin() noexcept override; - virtual iterator end() noexcept override; - - // Modifiers - virtual void put( const key_type& k, const value_type& v ) override; - virtual const value_type* get( const key_type& ) const override; - virtual void erase( const key_type& k ) override; - virtual void clear() noexcept override; - - virtual size_type size() const noexcept override; - - // Lookup - virtual iterator find( const key_type& k ) override; - virtual iterator lower_bound( const key_type& k ) override; - - virtual void start_write_batch() override; - virtual void end_write_batch() override; - - virtual void store_metadata() override; - - virtual std::shared_ptr< abstract_backend > clone() const override; - - private: - std::map< key_type, value_type > _map; - protocol::block_header _header; -}; - -} // koinos::state_db::backends::map diff --git a/libraries/state_db/include/koinos/state_db/backends/map/map_iterator.hpp b/libraries/state_db/include/koinos/state_db/backends/map/map_iterator.hpp deleted file mode 100644 index 854c492..0000000 --- a/libraries/state_db/include/koinos/state_db/backends/map/map_iterator.hpp +++ /dev/null @@ -1,36 +0,0 @@ -#pragma once - -#include - -#include - -namespace koinos::state_db::backends::map { - -class map_backend; - -class map_iterator final : public abstract_iterator -{ - public: - using value_type = abstract_iterator::value_type; - using map_impl = std::map< detail::key_type, detail::value_type >; - using iterator_impl = map_impl::iterator; - - map_iterator( std::unique_ptr< iterator_impl > itr, const map_impl& map ); - ~map_iterator(); - - virtual const value_type& operator*() const override; - - virtual const key_type& key() const override; - - virtual abstract_iterator& operator++() override; - virtual abstract_iterator& operator--() override; - - private: - virtual bool valid() const override; - virtual std::unique_ptr< abstract_iterator > copy() const override; - - std::unique_ptr< iterator_impl > _itr; - const map_impl& _map; -}; - -} // koinos::state_db::backends::map diff --git a/libraries/state_db/include/koinos/state_db/backends/rocksdb/object_cache.hpp b/libraries/state_db/include/koinos/state_db/backends/rocksdb/object_cache.hpp deleted file mode 100644 index 42cbb60..0000000 --- a/libraries/state_db/include/koinos/state_db/backends/rocksdb/object_cache.hpp +++ /dev/null @@ -1,52 +0,0 @@ -#pragma once - -#include - -#include - -#include -#include -#include -#include -#include - -namespace koinos::state_db::backends::rocksdb { - -class object_cache -{ - public: - using key_type = detail::key_type; - using value_type = detail::value_type; - - private: - using lru_list_type = std::list< key_type >; - using value_map_type = - std::map< - key_type, - std::pair< - std::shared_ptr< const value_type >, - typename lru_list_type::iterator - > - >; - - lru_list_type _lru_list; - value_map_type _object_map; - std::size_t _cache_size = 0; - const std::size_t _cache_max_size; - std::mutex _mutex; - - public: - object_cache( std::size_t size ); - ~object_cache(); - - std::pair< bool, std::shared_ptr< const value_type > > get( const key_type& k ); - std::shared_ptr< const value_type > put( const key_type& k, std::shared_ptr< const value_type > v ); - - void remove( const key_type& k ); - - void clear(); - - std::mutex& get_mutex(); -}; - -} // koinos::state_db::backends::rocksdb diff --git a/libraries/state_db/include/koinos/state_db/backends/rocksdb/rocksdb_backend.hpp b/libraries/state_db/include/koinos/state_db/backends/rocksdb/rocksdb_backend.hpp deleted file mode 100644 index 5768d0d..0000000 --- a/libraries/state_db/include/koinos/state_db/backends/rocksdb/rocksdb_backend.hpp +++ /dev/null @@ -1,66 +0,0 @@ -#pragma once - -#include -#include -#include -#include - -#include - -#include -#include -#include - -namespace koinos::state_db::backends::rocksdb { - -class rocksdb_backend final : public abstract_backend { - public: - using key_type = abstract_backend::key_type; - using value_type = abstract_backend::value_type; - using size_type = abstract_backend::size_type; - - rocksdb_backend(); - ~rocksdb_backend(); - - void open( const std::filesystem::path& p ); - void close(); - void flush(); - - virtual void start_write_batch() override; - virtual void end_write_batch() override; - - // Iterators - virtual iterator begin() override; - virtual iterator end() override; - - // Modifiers - virtual void put( const key_type& k, const value_type& v ) override; - virtual const value_type* get( const key_type& ) const override; - virtual void erase( const key_type& k ) override; - virtual void clear() override; - - virtual size_type size() const override; - - // Lookup - virtual iterator find( const key_type& k ) override; - virtual iterator lower_bound( const key_type& k ) override; - - virtual void store_metadata() override; - - virtual std::shared_ptr< abstract_backend > clone() const override; - - private: - void load_metadata(); - - using column_handles = std::vector< std::shared_ptr< ::rocksdb::ColumnFamilyHandle > >; - - std::shared_ptr< ::rocksdb::DB > _db; - std::optional< ::rocksdb::WriteBatch > _write_batch; - column_handles _handles; - ::rocksdb::WriteOptions _wopts; - std::shared_ptr< ::rocksdb::ReadOptions > _ropts; - mutable std::shared_ptr< object_cache > _cache; - size_type _size = 0; -}; - -} // koinos::state_db::backends::rocksdb diff --git a/libraries/state_db/include/koinos/state_db/backends/rocksdb/rocksdb_iterator.hpp b/libraries/state_db/include/koinos/state_db/backends/rocksdb/rocksdb_iterator.hpp deleted file mode 100644 index 9de070c..0000000 --- a/libraries/state_db/include/koinos/state_db/backends/rocksdb/rocksdb_iterator.hpp +++ /dev/null @@ -1,51 +0,0 @@ -#pragma once - -#include -#include - -#include - -#include - -namespace koinos::state_db::backends::rocksdb { - -class rocksdb_backend; - -class rocksdb_iterator final : public abstract_iterator -{ - public: - using value_type = abstract_iterator::value_type; - - rocksdb_iterator( - std::shared_ptr< ::rocksdb::DB > db, - std::shared_ptr< ::rocksdb::ColumnFamilyHandle > handle, - std::shared_ptr< const ::rocksdb::ReadOptions > opts, - std::shared_ptr< object_cache > cache ); - rocksdb_iterator( const rocksdb_iterator& other ); - virtual ~rocksdb_iterator() override; - - virtual const value_type& operator*() const override; - - virtual const key_type& key() const override; - - virtual abstract_iterator& operator++() override; - virtual abstract_iterator& operator--() override; - - private: - friend class rocksdb_backend; - - virtual bool valid() const override; - virtual std::unique_ptr< abstract_iterator > copy() const override; - - void update_cache_value() const; - - std::shared_ptr< ::rocksdb::DB > _db; - std::shared_ptr< ::rocksdb::ColumnFamilyHandle > _handle; - std::unique_ptr< ::rocksdb::Iterator > _iter; - std::shared_ptr< const ::rocksdb::ReadOptions > _opts; - mutable std::shared_ptr< object_cache > _cache; - mutable std::shared_ptr< const value_type > _cache_value; - mutable std::shared_ptr< const key_type > _key; -}; - -} // koinos::state_db::backends::rocksdb diff --git a/libraries/state_db/include/koinos/state_db/detail/merge_iterator.hpp b/libraries/state_db/include/koinos/state_db/detail/merge_iterator.hpp deleted file mode 100644 index efeea81..0000000 --- a/libraries/state_db/include/koinos/state_db/detail/merge_iterator.hpp +++ /dev/null @@ -1,163 +0,0 @@ -#pragma once - -#include - -#include - -#include -#include -#include -#include -#include - -#include - -namespace koinos::state_db::detail { - -using namespace boost::multi_index; - -struct iterator_wrapper -{ - iterator_wrapper( backends::iterator&& i, uint64_t r, std::shared_ptr< backends::abstract_backend > b ); - iterator_wrapper( iterator_wrapper&& i ); - iterator_wrapper( const iterator_wrapper& i ); - - const iterator_wrapper& self() const; - bool valid() const; - - backends::iterator itr; - std::shared_ptr< backends::abstract_backend > backend; - uint64_t revision; -}; - -// Uses revision as a tiebreaker only for when both iterators are invalid -// to enforce a total ordering on this comparator. The composite key on -// revision is still needed for the case when iterators are valid and equal. -// (i.e. lhs < rhs == false && rhs < lhs == false ) -struct iterator_compare_less -{ - bool operator()( const iterator_wrapper& lhs, const iterator_wrapper& rhs ) const; -}; - -struct iterator_compare_greater -{ - bool operator()( const iterator_wrapper& lhs, const iterator_wrapper& rhs ) const; -}; - -class merge_iterator : - public boost::bidirectional_iterator_helper< - merge_iterator, - typename state_delta::value_type, - std::size_t, - const typename state_delta::value_type*, - const typename state_delta::value_type& > -{ - public: - using key_type = state_delta::key_type; - using value_type = state_delta::value_type; - - private: - using iterator_type = backends::iterator; - using state_delta_ptr = std::shared_ptr< state_delta >; - - struct by_order_revision; - struct by_reverse_order_revision; - struct by_revision; - - using iter_revision_index_type = multi_index_container< - iterator_wrapper, - indexed_by< - ordered_unique< tag< by_order_revision >, - composite_key< iterator_wrapper, - const_mem_fun< iterator_wrapper, const iterator_wrapper&, &iterator_wrapper::self >, - member< iterator_wrapper, uint64_t, &iterator_wrapper::revision > - >, - composite_key_compare< iterator_compare_less, std::greater< uint64_t > > - >, - ordered_unique< tag< by_reverse_order_revision >, - composite_key< iterator_wrapper, - const_mem_fun< iterator_wrapper, const iterator_wrapper&, &iterator_wrapper::self >, - member< iterator_wrapper, uint64_t, &iterator_wrapper::revision > - >, - composite_key_compare< iterator_compare_greater, std::greater< uint64_t > > - >, - ordered_unique< tag< by_revision >, member< iterator_wrapper, uint64_t, &iterator_wrapper::revision > > - > - >; - - iter_revision_index_type _itr_revision_index; - std::deque< state_delta_ptr > _delta_deque; - - public: - template< typename Initializer > - merge_iterator( state_delta_ptr head, Initializer&& init ) - { - KOINOS_ASSERT( head, internal_error, "cannot create a merge iterator on a null delta" ); - auto current_delta = head; - - do - { - _delta_deque.push_front( current_delta ); - - _itr_revision_index.emplace( - iterator_wrapper( - std::move( init( current_delta->backend() ) ), - current_delta->revision(), - current_delta->backend() - ) - ); - - current_delta = current_delta->parent(); - } while( current_delta ); - - resolve_conflicts(); - } - - merge_iterator( const merge_iterator& other ); - - bool operator ==( const merge_iterator& other ) const; - - merge_iterator& operator++(); - merge_iterator& operator--(); - - const value_type& operator*() const; - - const key_type& key() const; - - private: - template< typename ItrType > - bool is_dirty( ItrType itr ) - { - bool dirty = false; - - for ( auto i = _delta_deque.size() - 1; itr->revision < _delta_deque[i]->revision() && !dirty; --i ) - { - dirty = _delta_deque[i]->is_modified( itr->itr.key() ); - } - - return dirty; - } - - void resolve_conflicts(); - bool is_end() const; -}; - -class merge_state -{ - public: - using key_type = state_delta::key_type; - using value_type = state_delta::value_type; - - merge_state( std::shared_ptr< state_delta > head ); - - merge_iterator begin() const; - merge_iterator end() const; - - const value_type* find( const key_type& key ) const; - merge_iterator lower_bound( const key_type& key ) const; - - private: - std::shared_ptr< state_delta > _head; -}; - -} // koinos::state_db::detail diff --git a/libraries/state_db/include/koinos/state_db/detail/state_delta.hpp b/libraries/state_db/include/koinos/state_db/detail/state_delta.hpp deleted file mode 100644 index 817b702..0000000 --- a/libraries/state_db/include/koinos/state_db/detail/state_delta.hpp +++ /dev/null @@ -1,88 +0,0 @@ -#pragma once -#include -#include -#include -#include - -#include -#include - -#include -#include -#include -#include -#include -#include - -namespace koinos::state_db::detail { - - class state_delta : public std::enable_shared_from_this< state_delta > - { - public: - using backend_type = backends::abstract_backend; - using key_type = backend_type::key_type; - using value_type = backend_type::value_type; - - private: - std::shared_ptr< state_delta > _parent; - - std::shared_ptr< backend_type > _backend; - std::unordered_set< key_type > _removed_objects; - - state_node_id _id; - uint64_t _revision = 0; - mutable std::optional< crypto::multihash > _merkle_root; - - bool _finalized = false; - - std::timed_mutex _cv_mutex; - std::condition_variable_any _cv; - - public: - state_delta() = default; - state_delta( const std::optional< std::filesystem::path >& p ); - ~state_delta() = default; - - void put( const key_type& k, const value_type& v ); - void erase( const key_type& k ); - const value_type* find( const key_type& key ) const; - - void squash(); - void commit(); - - void clear(); - - bool is_modified( const key_type& k ) const; - bool is_removed( const key_type& k ) const; - bool is_root() const; - bool is_empty() const; - - uint64_t revision() const; - void set_revision( uint64_t revision ); - - bool is_finalized() const; - void finalize(); - - std::condition_variable_any& cv(); - std::timed_mutex& cv_mutex(); - - crypto::multihash merkle_root() const; - std::vector< protocol::state_delta_entry > get_delta_entries() const; - - const state_node_id& id() const; - const state_node_id& parent_id() const; - std::shared_ptr< state_delta > parent() const; - const protocol::block_header& block_header() const; - - std::shared_ptr< state_delta > make_child( const state_node_id& id = state_node_id(), const protocol::block_header& header = protocol::block_header() ); - std::shared_ptr< state_delta > clone( const state_node_id& id, const protocol::block_header& header ); - - const std::shared_ptr< backend_type > backend() const; - - private: - void commit_helper(); - - std::shared_ptr< state_delta > get_root(); - }; - -} // koinos::state_db::detail diff --git a/libraries/state_db/include/koinos/state_db/state_db.hpp b/libraries/state_db/include/koinos/state_db/state_db.hpp deleted file mode 100644 index f076b97..0000000 --- a/libraries/state_db/include/koinos/state_db/state_db.hpp +++ /dev/null @@ -1,463 +0,0 @@ - -#pragma once -#include - -#include - -#include - -#include -#include -#include -#include -#include -#include - -namespace koinos::state_db { - -namespace detail { - -class database_impl; -class state_node_impl; -class anonymous_state_node_impl; - -} // detail - -class abstract_state_node; -class anonymous_state_node; - -using abstract_state_node_ptr = std::shared_ptr< abstract_state_node >; -using anonymous_state_node_ptr = std::shared_ptr< anonymous_state_node >; - -enum class fork_resolution_algorithm -{ - fifo, - block_time, - pob -}; - -/** - * Allows querying the database at a particular checkpoint. - */ -class abstract_state_node -{ - public: - abstract_state_node(); - virtual ~abstract_state_node(); - - /** - * Fetch an object if one exists. - * - * - Size of the object is written into result.size - * - Object's value is copied into args.buf, provided buf != nullptr and buf_size is large enough - * - If buf is too small, buf is unchanged, however result is still updated - * - args.key is copied into result.key - */ - const object_value* get_object( const object_space& space, const object_key& key ) const; - - /** - * Get the next object. - * - * - Size of the object is written into result.size - * - Object's value is copied into args.buf, provided buf != nullptr and buf_size is large enough - * - If buf is too small, buf is unchanged, however result is still updated - * - Found key is written into result - */ - std::pair< const object_value*, const object_key > get_next_object( const object_space& space, const object_key& key ) const; - - /** - * Get the previous object. - * - * - Size of the object is written into result.size - * - Object's value is copied into args.buf, provided buf != nullptr and buf_size is large enough - * - If buf is too small, buf is unchanged, however result is still updated - * - Found key is written into result - */ - std::pair< const object_value*, const object_key > get_prev_object( const object_space& space, const object_key& key ) const; - - /** - * Write an object into the state_node. - * - * - Fail if node is not writable. - * - If object exists, object is overwritten. - */ - int64_t put_object( const object_space& space, const object_key& key, const object_value* val ); - - /** - * Remove an object from the state_node - */ - int64_t remove_object( const object_space& space, const object_key& key ); - - /** - * Return true if the node is writable. - */ - bool is_finalized() const; - - /** - * Return the merkle root of writes on this state node - */ - crypto::multihash merkle_root() const; - - /** - * Returns the state delta entries associated with this state node - */ - std::vector< protocol::state_delta_entry > get_delta_entries() const; - - /** - * Returns an anonymous state node with this node as its parent. - */ - anonymous_state_node_ptr create_anonymous_node(); - - virtual const state_node_id& id() const = 0; - virtual const state_node_id& parent_id() const = 0; - virtual uint64_t revision() const = 0; - virtual abstract_state_node_ptr parent() const = 0; - virtual const protocol::block_header& block_header() const = 0; - - friend class detail::database_impl; - - protected: - virtual std::shared_ptr< abstract_state_node > shared_from_derived() = 0; - - std::unique_ptr< detail::state_node_impl > _impl; -}; - -class anonymous_state_node final : public abstract_state_node, public std::enable_shared_from_this< anonymous_state_node > -{ - public: - anonymous_state_node(); - ~anonymous_state_node(); - - const state_node_id& id() const override; - const state_node_id& parent_id() const override; - uint64_t revision() const override; - abstract_state_node_ptr parent() const override; - const protocol::block_header& block_header() const override; - - void commit(); - void reset(); - - friend class abstract_state_node; - - protected: - std::shared_ptr< abstract_state_node > shared_from_derived()override; - - private: - abstract_state_node_ptr _parent; -}; - -/** - * Allows querying the database at a particular checkpoint. - */ -class state_node final : public abstract_state_node, public std::enable_shared_from_this< state_node > -{ - public: - state_node(); - ~state_node(); - - const state_node_id& id() const override; - const state_node_id& parent_id() const override; - uint64_t revision() const override; - abstract_state_node_ptr parent() const override; - const protocol::block_header& block_header() const override; - - protected: - std::shared_ptr< abstract_state_node > shared_from_derived()override; -}; - -using state_node_ptr = std::shared_ptr< state_node >; -using genesis_init_function = std::function< void( state_node_ptr ) >; -using fork_list = std::vector< state_node_ptr >; -using state_node_comparator_function = std::function< state_node_ptr( fork_list&, state_node_ptr, state_node_ptr ) >; -using shared_lock_ptr = std::shared_ptr< const std::shared_lock< std::shared_mutex > >; -using unique_lock_ptr = std::shared_ptr< const std::unique_lock< std::shared_mutex > >; - -state_node_ptr fifo_comparator( fork_list& forks, state_node_ptr current_head, state_node_ptr new_head ); -state_node_ptr block_time_comparator( fork_list& forks, state_node_ptr current_head, state_node_ptr new_head ); -state_node_ptr pob_comparator( fork_list& forks, state_node_ptr current_head, state_node_ptr new_head ); - -/** - * database is designed to provide parallel access to the database across - * different states. - * - * It does by tracking positive state deltas, which can be merged on the fly - * at read time to return the correct state of the database. A database - * checkpoint is represented by the state_node class. Reads and writes happen - * against a state_node. - * - * States are organized as a tree with the assumption that one path wins out - * over time and cousin paths are discarded as the root is advanced. - * - * Currently, database is not thread safe. That is, calls directly on database - * are not thread safe. (i.e. deleting a node concurrently to creating a new - * node can leave database in an undefined state) - * - * Conccurrency across state nodes is supported native to the implementation - * without locks. Writes on a single state node need to be serialized, but - * reads are implicitly parallel. - * - * TODO: Either extend the design of database to support concurrent access - * or implement a some locking mechanism for access to the fork multi - * index container. - * - * There is an additional corner case that is difficult to address. - * - * Upon squashing a state node, readers may be reading from the node that - * is being squashed or an intermediate node between root and that node. - * Relatively speaking, this should happen infrequently (on the order of once - * per some number of seconds). As such, whatever guarantees concurrency - * should heavily favor readers. Writing can happen lazily, preferably when - * there is no contention from readers at all. - */ -class database final -{ - public: - database(); - ~database(); - - shared_lock_ptr get_shared_lock() const; - - unique_lock_ptr get_unique_lock() const; - - /** - * Open the database. - */ - void open( const std::optional< std::filesystem::path >& p, genesis_init_function init, fork_resolution_algorithm algo, const unique_lock_ptr& lock ); - - /** - * Open the database. - */ - void open( const std::optional< std::filesystem::path >& p, genesis_init_function init, state_node_comparator_function comp, const unique_lock_ptr& lock ); - - /** - * Close the database. - */ - void close( const unique_lock_ptr& lock ); - - /** - * Reset the database. - */ - void reset( const unique_lock_ptr& lock ); - - /** - * Get an ancestor of a node at a particular revision - */ - state_node_ptr get_node_at_revision( uint64_t revision, const state_node_id& child_id, const shared_lock_ptr& lock ) const; - state_node_ptr get_node_at_revision( uint64_t revision, const shared_lock_ptr& lock ) const; - - /** - * Get an ancestor of a node at a particular revision - * - * WARNING: The state node returned does not have an internal lock. The caller - * must be careful to ensure internal consistency. Best practice is to not - * share this node with a parallel thread and to reset it before releasing the - * unique lock. - */ - state_node_ptr get_node_at_revision( uint64_t revision, const state_node_id& child_id, const unique_lock_ptr& lock ) const; - state_node_ptr get_node_at_revision( uint64_t revision, const unique_lock_ptr& lock ) const; - - /** - * Get the state_node for the given state_node_id. - * - * Return an empty pointer if no node for the given id exists. - */ - state_node_ptr get_node( const state_node_id& node_id, const shared_lock_ptr& lock ) const; - - /** - * Get the state_node for the given state_node_id. - * - * Return an empty pointer if no node for the given id exists. - * - * WARNING: The state node returned does not have an internal lock. The caller - * must be careful to ensure internal consistency. Best practice is to not - * share this node with a parallel thread and to reset it before releasing the - * unique lock. - */ - state_node_ptr get_node( const state_node_id& node_id, const unique_lock_ptr& lock ) const; - - /** - * Create a writable state_node. - * - * - If parent_id refers to a writable node, fail. - * - Otherwise, return a new writable node. - * - Writing to the returned node will not modify the parent node. - * - * If the parent is subsequently discarded, database preserves - * as much of the parent's state storage as necessary to continue - * to serve queries on any (non-discarded) children. A discarded - * parent node's state may internally be merged into a child's - * state storage area, allowing the parent's state storage area - * to be freed. This merge may occur immediately, or it may be - * deferred or parallelized. - */ - state_node_ptr create_writable_node( const state_node_id& parent_id, const state_node_id& new_id, const protocol::block_header& header, const shared_lock_ptr& lock ); - - /** - * Create a writable state_node. - * - * - If parent_id refers to a writable node, fail. - * - Otherwise, return a new writable node. - * - Writing to the returned node will not modify the parent node. - * - * If the parent is subsequently discarded, database preserves - * as much of the parent's state storage as necessary to continue - * to serve queries on any (non-discarded) children. A discarded - * parent node's state may internally be merged into a child's - * state storage area, allowing the parent's state storage area - * to be freed. This merge may occur immediately, or it may be - * deferred or parallelized. - * - * WARNING: The state node returned does not have an internal lock. The caller - * must be careful to ensure internal consistency. Best practice is to not - * share this node with a parallel thread and to reset it before releasing the - * unique lock. - */ - state_node_ptr create_writable_node( const state_node_id& parent_id, const state_node_id& new_id, const protocol::block_header& header, const unique_lock_ptr& lock ); - - /** - * Clone a node with a new id and block header. - * - * Cannot clone a finalized node. - */ - state_node_ptr clone_node( const state_node_id& node_id, const state_node_id& new_id, const protocol::block_header& header, const shared_lock_ptr& lock ); - - /** - * Clone a node with a new id and block header. - * - * Cannot clone a finalized node. - * - * WARNING: The state node returned does not have an internal lock. The caller - * must be careful to ensure internal consistency. Best practice is to not - * share this node with a parallel thread and to reset it before releasing the - * unique lock. - */ - state_node_ptr clone_node( const state_node_id& node_id, const state_node_id& new_id, const protocol::block_header& header, const unique_lock_ptr& lock ); - - /** - * Finalize a node. The node will no longer be writable. - */ - void finalize_node( const state_node_id& node_id, const shared_lock_ptr& lock ); - - /** - * Finalize a node. The node will no longer be writable. - */ - void finalize_node( const state_node_id& node_id, const unique_lock_ptr& lock ); - - /** - * Discard the node, it can no longer be used. - * - * If the node has any children, they too will be deleted because - * there will no longer exist a path from root to those nodes. - * - * This will fail if the node you are deleting would cause the - * current head node to be delted. - */ - void discard_node( const state_node_id& node_id, const shared_lock_ptr& lock ); - - /** - * Discard the node, it can no longer be used. - * - * If the node has any children, they too will be deleted because - * there will no longer exist a path from root to those nodes. - * - * This will fail if the node you are deleting would cause the - * current head node to be delted. - */ - void discard_node( const state_node_id& node_id, const unique_lock_ptr& lock ); - - /** - * Squash the node in to the root state, committing it. - * Branching state between this node and its ancestor will be discarded - * and no longer accesible. - * - * It is the responsiblity of the caller to ensure no readers or writers - * are accessing affected nodes by this call. - * - * TODO: Implement thread safety within commit node to make - * database thread safe for all callers. - */ - void commit_node( const state_node_id& node_id, const unique_lock_ptr& lock ); - - /** - * Get and return the current "head" node. - * - * Head is determined by longest chain. Oldest - * chain wins in a tie of length. Only finalized - * nodes are eligible to become head. - */ - state_node_ptr get_head( const shared_lock_ptr& lock ) const; - - /** - * Get and return the current "head" node. - * - * Head is determined by longest chain. Oldest - * chain wins in a tie of length. Only finalized - * nodes are eligible to become head. - * - * WARNING: The state node returned does not have an internal lock. The caller - * must be careful to ensure internal consistency. Best practice is to not - * share this node with a parallel thread and to reset it before releasing the - * unique lock. - */ - state_node_ptr get_head( const unique_lock_ptr& lock ) const; - - /** - * Get and return a vector of all fork heads. - * - * Fork heads are any finalized nodes that do - * not have finalized children. - */ - std::vector< state_node_ptr > get_fork_heads( const shared_lock_ptr& lock ) const; - - /** - * Get and return a vector of all fork heads. - * - * Fork heads are any finalized nodes that do - * not have finalized children. - * - * WARNING: The state nodes returned does not have an internal lock. The caller - * must be careful to ensure internal consistency. Best practice is to not - * share this node with a parallel thread and to reset it before releasing the - * unique lock. - */ - std::vector< state_node_ptr > get_fork_heads( const unique_lock_ptr& lock ) const; - - /** - * Get and return a vector of all nodes. - */ - std::vector< state_node_ptr > get_all_nodes( const shared_lock_ptr& lock ) const; - - /** - * Get and return a vector of all nodes. - * - * WARNING: The state nodes returned does not have an internal lock. The caller - * must be careful to ensure internal consistency. Best practice is to not - * share this node with a parallel thread and to reset it before releasing the - * unique lock. - */ - std::vector< state_node_ptr > get_all_nodes( const unique_lock_ptr& lock ) const; - - /** - * Get and return the current "root" node. - * - * All state nodes are guaranteed to a descendant of root. - */ - state_node_ptr get_root( const shared_lock_ptr& lock ) const; - - /** - * Get and return the current "root" node. - * - * All state nodes are guaranteed to a descendant of root. - * - * WARNING: The state node returned does not have an internal lock. The caller - * must be careful to ensure internal consistency. Best practice is to not - * share this node with a parallel thread and to reset it before releasing the - * unique lock. - */ - state_node_ptr get_root( const unique_lock_ptr& lock ) const; - - private: - std::unique_ptr< detail::database_impl > impl; -}; - -} // koinos::state_db diff --git a/libraries/state_db/state_db.cpp b/libraries/state_db/state_db.cpp deleted file mode 100644 index 0ecfde6..0000000 --- a/libraries/state_db/state_db.cpp +++ /dev/null @@ -1,1389 +0,0 @@ - -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -namespace std { - template<> - struct hash< koinos::crypto::multihash > - { - std::size_t operator()( const koinos::crypto::multihash& mh ) const - { - static const std::hash< std::string > hash_fn; - return hash_fn( koinos::util::converter::as< std::string >( mh ) ); - } - }; - -} - -namespace koinos::chain { - bool operator==( const object_space& lhs, const object_space& rhs ) - { - return lhs.system() == rhs.system() - && lhs.zone() == rhs.zone() - && lhs.id() == rhs.id(); - } - - bool operator<( const object_space& lhs, const object_space& rhs ) - { - if ( lhs.system() < rhs.system() ) - { - return true; - } - else if ( lhs.system() > rhs.system() ) - { - return false; - } - - if ( lhs.zone() < rhs.zone() ) - { - return true; - } - else if ( lhs.system() > rhs.system() ) - { - return false; - } - - return lhs.id() < rhs.id(); - } -} - -namespace koinos::state_db { - -namespace detail { - -struct by_id; -struct by_revision; -struct by_parent; - -using state_delta_ptr = std::shared_ptr< state_delta >; - -using state_multi_index_type = boost::multi_index_container< - state_delta_ptr, - boost::multi_index::indexed_by< - boost::multi_index::ordered_unique< - boost::multi_index::tag< by_id >, - boost::multi_index::const_mem_fun< state_delta, const state_node_id&, &state_delta::id > - >, - boost::multi_index::ordered_non_unique< - boost::multi_index::tag< by_parent >, - boost::multi_index::const_mem_fun< state_delta, const state_node_id&, &state_delta::parent_id > - >, - boost::multi_index::ordered_non_unique< - boost::multi_index::tag< by_revision >, - boost::multi_index::const_mem_fun< state_delta, uint64_t, &state_delta::revision > - > - > ->; - -const object_key null_key = object_key(); - -/** - * Private implementation of state_node interface. - * - * Maintains a pointer to database_impl, - * only allows reads / writes if the node corresponds to the DB's current state. - */ -class state_node_impl final -{ - public: - state_node_impl() {} - ~state_node_impl() {} - - const object_value* get_object( const object_space& space, const object_key& key ) const; - std::pair< const object_value*, const object_key > get_next_object( const object_space& space, const object_key& key ) const; - std::pair< const object_value*, const object_key > get_prev_object( const object_space& space, const object_key& key ) const; - int64_t put_object( const object_space& space, const object_key& key, const object_value* val ); - int64_t remove_object( const object_space& space, const object_key& key ); - crypto::multihash merkle_root() const; - std::vector< protocol::state_delta_entry > get_delta_entries() const; - - state_delta_ptr _state; - shared_lock_ptr _lock; -}; - -/** - * Private implementation of database interface. - * - * This class relies heavily on using chainbase as a backing store. - * Only one state_node can be active at a time. This is enforced by - * _current_node which is a weak_ptr. New nodes will throw - * NodeNotExpired exception if a - */ -class database_impl final -{ - public: - database_impl() {} - ~database_impl() { close_lockless(); } - - shared_lock_ptr get_shared_lock() const; - unique_lock_ptr get_unique_lock() const; - bool verify_shared_lock( const shared_lock_ptr& lock ) const; - bool verify_unique_lock( const unique_lock_ptr& lock ) const; - - void open( const std::optional< std::filesystem::path >& p, genesis_init_function init, fork_resolution_algorithm algo, const unique_lock_ptr& lock ); - void open( const std::optional< std::filesystem::path >& p, genesis_init_function init, state_node_comparator_function comp, const unique_lock_ptr& lock ); - void open_lockless( const std::optional< std::filesystem::path >& p, genesis_init_function init, state_node_comparator_function comp ); - void close( const unique_lock_ptr& lock ); - void close_lockless(); - - void reset( const unique_lock_ptr& lock ); - state_node_ptr get_node_at_revision( uint64_t revision, const state_node_id& child, const shared_lock_ptr& lock ) const; - state_node_ptr get_node_at_revision( uint64_t revision, const state_node_id& child, const unique_lock_ptr& lock ) const; - state_node_ptr get_node( const state_node_id& node_id, const shared_lock_ptr& lock ) const; - state_node_ptr get_node( const state_node_id& node_id, const unique_lock_ptr& lock ) const; - state_node_ptr get_node_lockless( const state_node_id& node_id ) const; - state_node_ptr create_writable_node( const state_node_id& parent_id, const state_node_id& new_id, const protocol::block_header& header, const shared_lock_ptr& lock ); - state_node_ptr create_writable_node( const state_node_id& parent_id, const state_node_id& new_id, const protocol::block_header& header, const unique_lock_ptr& lock ); - state_node_ptr clone_node( const state_node_id& node_id, const state_node_id& new_id, const protocol::block_header& header, const shared_lock_ptr& lock ); - state_node_ptr clone_node( const state_node_id& node_id, const state_node_id& new_id, const protocol::block_header& header, const unique_lock_ptr& lock ); - void finalize_node( const state_node_id& node, const shared_lock_ptr& lock ); - void finalize_node( const state_node_id& node, const unique_lock_ptr& lock ); - void discard_node( const state_node_id& node, const std::unordered_set< state_node_id >& whitelist, const shared_lock_ptr& lock ); - void discard_node( const state_node_id& node, const std::unordered_set< state_node_id >& whitelist, const unique_lock_ptr& lock ); - void discard_node_lockless( const state_node_id& node, const std::unordered_set< state_node_id >& whitelist ); - void commit_node( const state_node_id& node, const unique_lock_ptr& lock ); - - state_node_ptr get_head( const shared_lock_ptr& lock ) const; - state_node_ptr get_head( const unique_lock_ptr& lock ) const; - state_node_ptr get_head_lockless() const; - std::vector< state_node_ptr > get_fork_heads( const shared_lock_ptr& lock ) const; - std::vector< state_node_ptr > get_fork_heads( const unique_lock_ptr& lock ) const; - std::vector< state_node_ptr > get_all_nodes( const shared_lock_ptr& lock ) const; - std::vector< state_node_ptr > get_all_nodes( const unique_lock_ptr& lock ) const; - state_node_ptr get_root( const shared_lock_ptr& lock ) const; - state_node_ptr get_root( const unique_lock_ptr& lock ) const; - state_node_ptr get_root_lockless() const; - - bool is_open() const; - - std::optional< std::filesystem::path > _path; - genesis_init_function _init_func = nullptr; - state_node_comparator_function _comp = nullptr; - - state_multi_index_type _index; - state_delta_ptr _head; - std::map< state_node_id, state_delta_ptr > _fork_heads; - state_delta_ptr _root; - - /* Regarding mutexes used for synchronizing state_db... - * - * There are three mutexes that can be locked. They are: - * - _index_mutex (locks access to _index) - * - _node_mutex (locks access to creating new state_node_ptrs) - * - state_delta::cv_mutex() (locks access to a state_delta cv) - * - * Shared locks on the _node_mutex must exist beyond the scope of calls to state_db, - * so _node_mutex must be locked first. - * - * Consequently, _index_mutex must be locked last. All functions in state_db MUST - * follow this convention or we risk deadlock. - */ - mutable std::timed_mutex _index_mutex; - mutable std::shared_mutex _node_mutex; - mutable std::shared_mutex _fork_heads_mutex; -}; - -shared_lock_ptr database_impl::get_shared_lock() const -{ - return std::make_shared< std::shared_lock< std::shared_mutex > >( _node_mutex ); -} - -unique_lock_ptr database_impl::get_unique_lock() const -{ - return std::make_shared< std::unique_lock< std::shared_mutex > >( _node_mutex ); -} - -bool database_impl::verify_shared_lock( const shared_lock_ptr& lock ) const -{ - if ( !lock ) - return false; - - if ( !lock->owns_lock() ) - return false; - - return lock->mutex() == &_node_mutex; -} - -bool database_impl::verify_unique_lock( const unique_lock_ptr& lock ) const -{ - if ( !lock ) - return false; - - if ( !lock->owns_lock() ) - return false; - - return lock->mutex() == &_node_mutex; -} - -void database_impl::reset( const unique_lock_ptr& lock ) -{ - // - // This method closes, wipes and re-opens the database. - // - // So the caller needs to be very careful to only call this method if deleting the database is desirable! - // - KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - std::unique_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); - - KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); - // Wipe and start over from empty database! - _root->clear(); - close_lockless(); - open_lockless( _path, _init_func, _comp ); -} - -void database_impl::open( const std::optional< std::filesystem::path >& p, genesis_init_function init, fork_resolution_algorithm algo, const unique_lock_ptr& lock ) -{ - KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); - - state_node_comparator_function comp; - - switch ( algo ) - { - case fork_resolution_algorithm::block_time: - comp = &block_time_comparator; - break; - case fork_resolution_algorithm::pob: - comp = &pob_comparator; - break; - case fork_resolution_algorithm::fifo: - [[fallthrough]]; - default: - comp = &fifo_comparator; - } - - open( p, init, comp, lock ); -} - -void database_impl::open( const std::optional< std::filesystem::path >& p, genesis_init_function init, state_node_comparator_function comp, const unique_lock_ptr& lock ) -{ - KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - std::unique_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); - open_lockless( p, init, comp ); -} - -void database_impl::open_lockless( const std::optional< std::filesystem::path >& p, genesis_init_function init, state_node_comparator_function comp ) -{ - auto root = std::make_shared< state_node >(); - root->_impl->_state = std::make_shared< state_delta >( p ); - _init_func = init; - _comp = comp; - - if ( !root->revision() && root->_impl->_state->is_empty() && _init_func ) - { - init( root ); - } - root->_impl->_state->finalize(); - _index.insert( root->_impl->_state ); - _root = root->_impl->_state; - _head = root->_impl->_state; - _fork_heads.insert_or_assign( _head->id(), _head ); - - _path = p; -} - -void database_impl::close( const unique_lock_ptr& lock ) -{ - KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - std::unique_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); - close_lockless(); -} - -void database_impl::close_lockless() -{ - _fork_heads.clear(); - _root.reset(); - _head.reset(); - _index.clear(); -} - -state_node_ptr database_impl::get_node_at_revision( uint64_t revision, const state_node_id& child_id, const shared_lock_ptr& lock ) const -{ - KOINOS_ASSERT( verify_shared_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); - KOINOS_ASSERT( revision >= _root->revision(), illegal_argument, - "cannot ask for node with revision less than root. root rev: ${root}, requested: ${req}", - ("root", _root->revision())("req", revision) ); - - if( revision == _root->revision() ) - { - auto root = get_root_lockless(); - if ( root ) - root->_impl->_lock = lock; - - return root; - } - - auto child = get_node_lockless( child_id ); - if( !child ) - child = get_head_lockless(); - - state_delta_ptr delta = child->_impl->_state; - - while( delta->revision() > revision ) - { - delta = delta->parent(); - } - - auto node_itr = _index.find( delta->id() ); - - KOINOS_ASSERT( node_itr != _index.end(), internal_error, - "could not find state node associated with linked state_delta ${id}", ("id", delta->id() ) ); - - auto node = std::make_shared< state_node >(); - node->_impl->_state = *node_itr; - node->_impl->_lock = lock; - return node; -} - -state_node_ptr database_impl::get_node_at_revision( uint64_t revision, const state_node_id& child_id, const unique_lock_ptr& lock ) const -{ - KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); - KOINOS_ASSERT( revision >= _root->revision(), illegal_argument, - "cannot ask for node with revision less than root. root rev: ${root}, requested: ${req}", - ("root", _root->revision())("req", revision) ); - - if( revision == _root->revision() ) - { - auto root = get_root_lockless(); - - return root; - } - - auto child = get_node_lockless( child_id ); - if( !child ) - child = get_head_lockless(); - - state_delta_ptr delta = child->_impl->_state; - - while( delta->revision() > revision ) - { - delta = delta->parent(); - } - - auto node_itr = _index.find( delta->id() ); - - KOINOS_ASSERT( node_itr != _index.end(), internal_error, - "could not find state node associated with linked state_delta ${id}", ("id", delta->id() ) ); - - auto node = std::make_shared< state_node >(); - node->_impl->_state = *node_itr; - return node; -} - -state_node_ptr database_impl::get_node( const state_node_id& node_id, const shared_lock_ptr& lock ) const -{ - KOINOS_ASSERT( verify_shared_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - - auto node = get_node_lockless( node_id ); - if ( node ) - node->_impl->_lock = lock; - - return node; -} - -state_node_ptr database_impl::get_node( const state_node_id& node_id, const unique_lock_ptr& lock ) const -{ - KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - - auto node = get_node_lockless( node_id ); - - return node; -} - -state_node_ptr database_impl::get_node_lockless( const state_node_id& node_id ) const -{ - KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); - - auto node_itr = _index.find( node_id ); - - if ( node_itr != _index.end() ) - { - auto node = std::make_shared< state_node >(); - node->_impl->_state = *node_itr; - return node; - } - - return state_node_ptr(); -} - -state_node_ptr database_impl::create_writable_node( const state_node_id& parent_id, const state_node_id& new_id, const protocol::block_header& header, const shared_lock_ptr& lock ) -{ - KOINOS_ASSERT( verify_shared_lock( lock ), illegal_argument, "database is not properly locked" );; - - // Needs to be configurable - auto timeout = std::chrono::system_clock::now() + std::chrono::seconds( 1 ); - - state_node_ptr parent_state = get_node( parent_id, lock ); - - if ( parent_state ) - { - std::unique_lock< std::timed_mutex > cv_lock( parent_state->_impl->_state->cv_mutex(), timeout ); - - // We need to own the lock - if ( cv_lock.owns_lock() ) - { - // Check if the node is finalized - bool is_finalized = parent_state->is_finalized(); - - // If the node is finalized, try to wait for the node to be finalized - if ( !is_finalized && parent_state->_impl->_state->cv().wait_until( cv_lock, timeout ) == std::cv_status::no_timeout ) - is_finalized = parent_state->is_finalized(); - - // Finally, if the node is finalized, we can create a new writable node with the desired parent - if ( is_finalized ) - { - auto node = std::make_shared< state_node >(); - node->_impl->_state = parent_state->_impl->_state->make_child( new_id, header ); - - std::unique_lock< std::timed_mutex > index_lock( _index_mutex, timeout ); - - // Ensure the parent node still exists in the index and then insert the child node - if ( index_lock.owns_lock() && _index.find( parent_id ) != _index.end() && _index.insert( node->_impl->_state ).second ) - { - node->_impl->_lock = lock; - return node; - } - } - } - } - - return state_node_ptr(); -} - -state_node_ptr database_impl::create_writable_node( const state_node_id& parent_id, const state_node_id& new_id, const protocol::block_header& header, const unique_lock_ptr& lock ) -{ - KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" );; - - // Needs to be configurable - auto timeout = std::chrono::system_clock::now() + std::chrono::seconds( 1 ); - - state_node_ptr parent_state = get_node( parent_id, lock ); - - if ( parent_state ) - { - std::unique_lock< std::timed_mutex > cv_lock( parent_state->_impl->_state->cv_mutex(), timeout ); - - // We need to own the lock - if ( cv_lock.owns_lock() ) - { - // Check if the node is finalized - bool is_finalized = parent_state->is_finalized(); - - // If the node is finalized, try to wait for the node to be finalized - if ( !is_finalized && parent_state->_impl->_state->cv().wait_until( cv_lock, timeout ) == std::cv_status::no_timeout ) - is_finalized = parent_state->is_finalized(); - - // Finally, if the node is finalized, we can create a new writable node with the desired parent - if ( is_finalized ) - { - auto node = std::make_shared< state_node >(); - node->_impl->_state = parent_state->_impl->_state->make_child( new_id, header ); - - std::unique_lock< std::timed_mutex > index_lock( _index_mutex, timeout ); - - // Ensure the parent node still exists in the index and then insert the child node - if ( index_lock.owns_lock() && _index.find( parent_id ) != _index.end() && _index.insert( node->_impl->_state ).second ) - { - return node; - } - } - } - } - - return state_node_ptr(); -} - -state_node_ptr database_impl::clone_node( const state_node_id& node_id, const state_node_id& new_id, const protocol::block_header& header, const shared_lock_ptr& lock ) -{ - KOINOS_ASSERT( verify_shared_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); - - auto node = get_node_lockless( node_id ); - KOINOS_ASSERT( node, illegal_argument, "node ${n} not found.", ("n", node_id) ); - KOINOS_ASSERT( !node->is_finalized(), illegal_argument, "cannot clone finalized node" ); - - auto new_node = std::make_shared< state_node >(); - new_node->_impl->_state = node->_impl->_state->clone( new_id, header ); - - if ( _index.insert( new_node->_impl->_state ).second ) - { - new_node->_impl->_lock = lock; - return new_node; - } - - return state_node_ptr(); -} - -state_node_ptr database_impl::clone_node( const state_node_id& node_id, const state_node_id& new_id, const protocol::block_header& header, const unique_lock_ptr& lock ) -{ - KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); - - auto node = get_node_lockless( node_id ); - KOINOS_ASSERT( node, illegal_argument, "node ${n} not found.", ("n", node_id) ); - KOINOS_ASSERT( !node->is_finalized(), illegal_argument, "cannot clone finalized node" ); - - auto new_node = std::make_shared< state_node >(); - new_node->_impl->_state = node->_impl->_state->clone( new_id, header ); - - if ( _index.insert( new_node->_impl->_state ).second ) - { - return new_node; - } - - return state_node_ptr(); -} - -void database_impl::finalize_node( const state_node_id& node_id, const shared_lock_ptr& lock ) -{ - KOINOS_ASSERT( verify_shared_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); - auto node = get_node_lockless( node_id ); - KOINOS_ASSERT( node, illegal_argument, "node ${n} not found.", ("n", node_id) ); - - { - std::lock_guard< std::timed_mutex > index_lock( node->_impl->_state->cv_mutex() ); - - node->_impl->_state->finalize(); - } - - node->_impl->_state->cv().notify_all(); - - if ( node->revision() > _head->revision() ) - { - _head = node->_impl->_state; - } - else if ( node->revision() == _head->revision() ) - { - std::unique_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); - fork_list forks; - forks.reserve( _fork_heads.size() ); - std::transform( - std::begin( _fork_heads ), std::end( _fork_heads ), std::back_inserter( forks ), - []( const auto& entry ) - { - state_node_ptr s = std::make_shared< state_node >(); - s->_impl->_state = entry.second; - return s; - } - ); - - auto head = get_head_lockless(); - if ( auto new_head = _comp( forks, head, node ); new_head != nullptr ) - { - _head = new_head->_impl->_state; - } - else - { - _head = head->parent()->_impl->_state; - auto head_itr = _fork_heads.find( head->id() ); - if ( head_itr != std::end( _fork_heads ) ) - _fork_heads.erase( head_itr ); - _fork_heads.insert_or_assign( head->parent()->id(), _head ); - } - } - - // When node is finalized, parent node needs to be removed from heads, if it exists. - std::unique_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); - if ( node->parent_id() != _head->id() ) - { - auto parent_itr = _fork_heads.find( node->parent_id() ); - if ( parent_itr != std::end( _fork_heads ) ) - _fork_heads.erase( parent_itr ); - - _fork_heads.insert_or_assign( node->id(), node->_impl->_state ); - } -} - -void database_impl::finalize_node( const state_node_id& node_id, const unique_lock_ptr& lock ) -{ - KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); - auto node = get_node_lockless( node_id ); - KOINOS_ASSERT( node, illegal_argument, "node ${n} not found.", ("n", node_id) ); - - { - std::lock_guard< std::timed_mutex > index_lock( node->_impl->_state->cv_mutex() ); - - node->_impl->_state->finalize(); - } - - node->_impl->_state->cv().notify_all(); - - if ( node->revision() > _head->revision() ) - { - _head = node->_impl->_state; - } - else if ( node->revision() == _head->revision() ) - { - std::unique_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); - fork_list forks; - forks.reserve( _fork_heads.size() ); - std::transform( - std::begin( _fork_heads ), std::end( _fork_heads ), std::back_inserter( forks ), - []( const auto& entry ) - { - state_node_ptr s = std::make_shared< state_node >(); - s->_impl->_state = entry.second; - return s; - } - ); - - auto head = get_head_lockless(); - if ( auto new_head = _comp( forks, head, node ); new_head != nullptr ) - { - _head = new_head->_impl->_state; - } - else - { - _head = head->parent()->_impl->_state; - auto head_itr = _fork_heads.find( head->id() ); - if ( head_itr != std::end( _fork_heads ) ) - _fork_heads.erase( head_itr ); - _fork_heads.insert_or_assign( head->parent()->id(), _head ); - } - } - - // When node is finalized, parent node needs to be removed from heads, if it exists. - std::unique_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); - if ( node->parent_id() != _head->id() ) - { - auto parent_itr = _fork_heads.find( node->parent_id() ); - if ( parent_itr != std::end( _fork_heads ) ) - _fork_heads.erase( parent_itr ); - - _fork_heads.insert_or_assign( node->id(), node->_impl->_state ); - } -} - -void database_impl::discard_node( const state_node_id& node_id, const std::unordered_set< state_node_id >& whitelist, const shared_lock_ptr& lock ) -{ - KOINOS_ASSERT( verify_shared_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - std::unique_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); - discard_node_lockless( node_id, whitelist ); -} - -void database_impl::discard_node( const state_node_id& node_id, const std::unordered_set< state_node_id >& whitelist, const unique_lock_ptr& lock ) -{ - KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - std::unique_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); - discard_node_lockless( node_id, whitelist ); -} - -void database_impl::discard_node_lockless( const state_node_id& node_id, const std::unordered_set< state_node_id >& whitelist ) -{ - KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); - auto node = get_node_lockless( node_id ); - - if( !node ) return; - - KOINOS_ASSERT( node_id != _root->id(), illegal_argument, "cannot discard root node" ); - - std::vector< state_node_id > remove_queue{ node_id }; - const auto& previdx = _index.template get< by_parent >(); - const auto head_id = _head->id(); - - for( uint32_t i = 0; i < remove_queue.size(); ++i ) - { - KOINOS_ASSERT( remove_queue[ i ] != head_id, cannot_discard, "cannot discard a node that would result in discarding of head" ); - - auto previtr = previdx.lower_bound( remove_queue[ i ] ); - while ( previtr != previdx.end() && (*previtr)->parent_id() == remove_queue[ i ] ) - { - // Do not remove nodes on the whitelist - if ( whitelist.find( (*previtr)->id() ) == whitelist.end() ) - { - remove_queue.push_back( (*previtr)->id() ); - } - - ++previtr; - } - - // We may discard one or more fork heads when discarding a minority fork tree - // For completeness, we'll check every node to see if it is a fork head - auto head_itr = _fork_heads.find( remove_queue[ i ] ); - if ( head_itr != _fork_heads.end() ) - { - _fork_heads.erase( head_itr ); - } - } - - for( const auto& id : remove_queue ) - { - auto itr = _index.find( id ); - if ( itr != _index.end() ) - _index.erase( itr ); - } - - // When node is discarded, if the parent node is not a parent of other nodes (no forks), add it to heads. - auto fork_itr = previdx.find( node->parent_id() ); - if ( fork_itr == previdx.end() ) - { - auto parent_itr = _index.find( node->parent_id() ); - KOINOS_ASSERT( parent_itr != _index.end(), internal_error, "discarded parent node not found in node index" ); - _fork_heads.insert_or_assign( (*parent_itr)->id(), *parent_itr ); - } -} - -void database_impl::commit_node( const state_node_id& node_id, const unique_lock_ptr& lock ) -{ - KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" );; - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - std::unique_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); - KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); - - // If the node_id to commit is the root id, return. It is already committed. - if ( node_id == _root->id() ) - return; - - auto node = get_node_lockless( node_id ); - KOINOS_ASSERT( node, illegal_argument, "node ${n} not found", ("n", node_id) ); - - auto old_root = _root; - _root = node->_impl->_state; - - _index.modify( _index.find( node_id ), []( state_delta_ptr& n ){ n->commit(); } ); - - std::unordered_set< state_node_id > whitelist{ node_id }; - discard_node_lockless( old_root->id(), whitelist ); -} - -state_node_ptr database_impl::get_head( const shared_lock_ptr& lock ) const -{ - KOINOS_ASSERT( verify_shared_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - - auto head = get_head_lockless(); - if ( head ) - head->_impl->_lock = lock; - - return head; -} - -state_node_ptr database_impl::get_head( const unique_lock_ptr& lock ) const -{ - KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - - auto head = get_head_lockless(); - - return head; -} - -state_node_ptr database_impl::get_head_lockless() const -{ - KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); - auto head = std::make_shared< state_node >(); - head->_impl->_state = _head; - return head; -} - -std::vector< state_node_ptr > database_impl::get_fork_heads( const shared_lock_ptr& lock ) const -{ - KOINOS_ASSERT( verify_shared_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - std::shared_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); - KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); - std::vector< state_node_ptr > fork_heads; - fork_heads.reserve( _fork_heads.size() ); - - for( auto& head : _fork_heads ) - { - auto fork_head = std::make_shared< state_node >(); - fork_head->_impl->_state = head.second; - fork_head->_impl->_lock = lock; - fork_heads.push_back( fork_head ); - } - - return fork_heads; -} - -std::vector< state_node_ptr > database_impl::get_fork_heads( const unique_lock_ptr& lock ) const -{ - KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - std::shared_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); - KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); - std::vector< state_node_ptr > fork_heads; - fork_heads.reserve( _fork_heads.size() ); - - for( auto& head : _fork_heads ) - { - auto fork_head = std::make_shared< state_node >(); - fork_head->_impl->_state = head.second; - fork_heads.push_back( fork_head ); - } - - return fork_heads; -} - -std::vector< state_node_ptr > database_impl::get_all_nodes( const shared_lock_ptr& lock ) const -{ - KOINOS_ASSERT( verify_shared_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); - std::vector< state_node_ptr > nodes; - nodes.reserve( _index.size() ); - - for ( const auto& delta : _index ) - { - auto node = std::make_shared< state_node >(); - node->_impl->_state = delta; - node->_impl->_lock = lock; - nodes.push_back( node ); - } - - return nodes; -} - -std::vector< state_node_ptr > database_impl::get_all_nodes( const unique_lock_ptr& lock ) const -{ - KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); - std::vector< state_node_ptr > nodes; - nodes.reserve( _index.size() ); - - for ( const auto& delta : _index ) - { - auto node = std::make_shared< state_node >(); - node->_impl->_state = delta; - nodes.push_back( node ); - } - - return nodes; -} - -state_node_ptr database_impl::get_root( const shared_lock_ptr& lock ) const -{ - KOINOS_ASSERT( verify_shared_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - - auto root = get_root_lockless(); - if ( root ) - root->_impl->_lock = lock; - - return root; -} - -state_node_ptr database_impl::get_root( const unique_lock_ptr& lock ) const -{ - KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - - auto root = get_root_lockless(); - - return root; -} - -state_node_ptr database_impl::get_root_lockless() const -{ - KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); - auto root = std::make_shared< state_node >(); - root->_impl->_state = _root; - return root; -} - -bool database_impl::is_open() const -{ - return (bool)_root && (bool)_head; -} - -const object_value* state_node_impl::get_object( const object_space& space, const object_key& key ) const -{ - chain::database_key db_key; - *db_key.mutable_space() = space; - db_key.set_key( key ); - auto key_string = util::converter::as< std::string >( db_key ); - - auto pobj = merge_state( _state ).find( key_string ); - - if( pobj != nullptr ) - { - return pobj; - } - - return nullptr; -} - -std::pair< const object_value*, const object_key > state_node_impl::get_next_object( const object_space& space, const object_key& key ) const -{ - chain::database_key db_key; - *db_key.mutable_space() = space; - db_key.set_key( key ); - auto key_string = util::converter::as< std::string >( db_key ); - - auto state = merge_state( _state ); - auto it = state.lower_bound( key_string ); - - if ( it != state.end() && it.key() == key_string ) - { - it++; - } - - if( it != state.end() ) - { - chain::database_key next_key = util::converter::to< chain::database_key >( it.key() ); - - if ( next_key.space() == space ) - { - return { &*it, next_key.key() }; - } - } - - return { nullptr, null_key }; -} - -std::pair< const object_value*, const object_key > state_node_impl::get_prev_object( const object_space& space, const object_key& key ) const -{ - chain::database_key db_key; - *db_key.mutable_space() = space; - db_key.set_key( key ); - auto key_string = util::converter::as< std::string >( db_key ); - - auto state = merge_state( _state ); - auto it = state.lower_bound( key_string ); - - if( it != state.begin() ) - { - --it; - chain::database_key next_key = util::converter::to< chain::database_key >( it.key() ); - - if ( next_key.space() == space ) - { - return { &*it, next_key.key() }; - } - } - - return { nullptr, null_key }; -} - -int64_t state_node_impl::put_object( const object_space& space, const object_key& key, const object_value* val ) -{ - KOINOS_ASSERT( !_state->is_finalized(), node_finalized, "cannot write to a finalized node" ); - - chain::database_key db_key; - *db_key.mutable_space() = space; - db_key.set_key( key ); - auto key_string = util::converter::as< std::string >( db_key ); - - int64_t bytes_used = 0; - auto pobj = merge_state( _state ).find( key_string ); - - if ( pobj != nullptr ) - bytes_used -= pobj->size(); - else - bytes_used += key_string.size(); - - bytes_used += val->size(); - _state->put( key_string, *val ); - - return bytes_used; -} - -int64_t state_node_impl::remove_object( const object_space& space, const object_key& key ) -{ - KOINOS_ASSERT( !_state->is_finalized(), node_finalized, "cannot write to a finalized node" ); - - chain::database_key db_key; - *db_key.mutable_space() = space; - db_key.set_key( key ); - auto key_string = util::converter::as< std::string >( db_key ); - - int64_t bytes_used = 0; - auto pobj = merge_state( _state ).find( key_string ); - - if ( pobj != nullptr ) - { - bytes_used -= pobj->size(); - bytes_used -= key_string.size(); - } - - _state->erase( key_string ); - - return bytes_used; -} - -crypto::multihash state_node_impl::merkle_root() const -{ - return _state->merkle_root(); -} - -std::vector< protocol::state_delta_entry > state_node_impl::get_delta_entries() const -{ - return _state->get_delta_entries(); -} - -} // detail - -abstract_state_node::abstract_state_node() : _impl( new detail::state_node_impl() ) {} -abstract_state_node::~abstract_state_node() {} - -const object_value* abstract_state_node::get_object( const object_space& space, const object_key& key ) const -{ - return _impl->get_object( space, key ); -} - -std::pair< const object_value*, const object_key > abstract_state_node::get_next_object( const object_space& space, const object_key& key ) const -{ - return _impl->get_next_object( space, key ); -} - -std::pair< const object_value*, const object_key > abstract_state_node::get_prev_object( const object_space& space, const object_key& key ) const -{ - return _impl->get_prev_object( space, key ); -} - -int64_t abstract_state_node::put_object( const object_space& space, const object_key& key, const object_value* val ) -{ - return _impl->put_object( space, key, val ); -} - -int64_t abstract_state_node::remove_object( const object_space& space, const object_key& key ) -{ - return _impl->remove_object( space, key ); -} - -bool abstract_state_node::is_finalized() const -{ - return _impl->_state->is_finalized(); -} - -crypto::multihash abstract_state_node::merkle_root() const -{ - KOINOS_ASSERT( is_finalized(), koinos::exception, "node must be finalized to calculate merkle root" ); - return _impl->merkle_root(); -} - -std::vector< protocol::state_delta_entry > abstract_state_node::get_delta_entries() const -{ - return _impl->get_delta_entries(); -} - -anonymous_state_node_ptr abstract_state_node::create_anonymous_node() -{ - auto anonymous_node = std::make_shared< anonymous_state_node >(); - anonymous_node->_parent = shared_from_derived(); - anonymous_node->_impl->_state = _impl->_state->make_child(); - anonymous_node->_impl->_lock = _impl->_lock; - return anonymous_node; -} - -state_node::state_node() : abstract_state_node() {} -state_node::~state_node() {} - -const state_node_id& state_node::id() const -{ - return _impl->_state->id(); -} - -const state_node_id& state_node::parent_id() const -{ - return _impl->_state->parent_id(); -} - -uint64_t state_node::revision() const -{ - return _impl->_state->revision(); -} - -abstract_state_node_ptr state_node::parent() const -{ - auto parent_delta = _impl->_state->parent(); - if ( parent_delta ) - { - auto parent_node = std::make_shared< state_node >(); - parent_node->_impl->_state = parent_delta; - parent_node->_impl->_lock = _impl->_lock; - return parent_node; - } - - return abstract_state_node_ptr(); -} - -const protocol::block_header& state_node::block_header() const -{ - return _impl->_state->block_header(); -} - -abstract_state_node_ptr state_node::shared_from_derived() -{ - return shared_from_this(); -} - -anonymous_state_node::anonymous_state_node() : abstract_state_node() {} -anonymous_state_node::anonymous_state_node::~anonymous_state_node() {} - -const state_node_id& anonymous_state_node::id() const -{ - return _parent->id(); -} - -const state_node_id& anonymous_state_node::parent_id() const -{ - return _parent->parent_id(); -} - -uint64_t anonymous_state_node::revision() const -{ - return _parent->revision(); -} - -abstract_state_node_ptr anonymous_state_node::parent() const -{ - return _parent; -} - -const protocol::block_header& anonymous_state_node::block_header() const -{ - return _parent->block_header(); -} - -void anonymous_state_node::commit() -{ - KOINOS_ASSERT( !_parent->is_finalized(), node_finalized, "cannot commit to a finalized node" ); - _impl->_state->squash(); - reset(); -} - -void anonymous_state_node::reset() -{ - _impl->_state = _impl->_state->make_child(); -} - -abstract_state_node_ptr anonymous_state_node::shared_from_derived() -{ - return shared_from_this(); -} - - -state_node_ptr fifo_comparator( fork_list& forks, state_node_ptr current_head, state_node_ptr new_head ) -{ - return current_head; -} - -state_node_ptr block_time_comparator( fork_list& forks, state_node_ptr head_block, state_node_ptr new_block ) -{ - return new_block->block_header().timestamp() < head_block->block_header().timestamp() ? new_block : head_block; -} - -state_node_ptr pob_comparator( fork_list& forks, state_node_ptr head_block, state_node_ptr new_block ) -{ - if ( head_block->block_header().signer() != new_block->block_header().signer() ) - return new_block->block_header().timestamp() < head_block->block_header().timestamp() ? new_block : head_block; - - auto it = std::find_if( std::begin( forks ), std::end( forks ), [&]( state_node_ptr p ) { return p->id() == head_block->id(); } ); - if ( it != std::end( forks ) ) - forks.erase( it ); - - struct { - bool operator()( abstract_state_node_ptr a, abstract_state_node_ptr b ) const - { - if ( a->revision() > b->revision() ) - return true; - else if ( a->revision() < b->revision() ) - return false; - - if ( a->block_header().timestamp() < b->block_header().timestamp() ) - return true; - else if ( a->block_header().timestamp() > b->block_header().timestamp() ) - return false; - - if ( a->id() < b->id() ) - return true; - - return false; - } - } priority_algorithm; - - if ( std::size( forks ) ) - { - std::sort( std::begin( forks ), std::end( forks ), priority_algorithm ); - it = std::begin( forks ); - return priority_algorithm( head_block->parent(), *it ) ? state_node_ptr() : *it; - } - - return state_node_ptr(); -} - -database::database() : impl( new detail::database_impl() ) {} -database::~database() {} - -shared_lock_ptr database::get_shared_lock() const -{ - return impl->get_shared_lock(); -} - -unique_lock_ptr database::get_unique_lock() const -{ - return impl->get_unique_lock(); -} - -void database::open( const std::optional< std::filesystem::path >& p, genesis_init_function init, fork_resolution_algorithm algo, const unique_lock_ptr& lock ) -{ - impl->open( p, init, algo, lock ? lock : get_unique_lock() ); -} - -void database::open( const std::optional< std::filesystem::path >& p, genesis_init_function init, state_node_comparator_function comp, const unique_lock_ptr& lock ) -{ - impl->open( p, init, comp, lock ? lock : get_unique_lock() ); -} - -void database::close( const unique_lock_ptr& lock ) -{ - impl->close( lock ? lock : get_unique_lock() ); -} - -void database::reset( const unique_lock_ptr& lock ) -{ - impl->reset( lock ? lock : get_unique_lock() ); -} - -state_node_ptr database::get_node_at_revision( uint64_t revision, const state_node_id& child_id, const shared_lock_ptr& lock ) const -{ - return impl->get_node_at_revision( revision, child_id, lock ); -} - -state_node_ptr database::get_node_at_revision( uint64_t revision, const shared_lock_ptr& lock ) const -{ - static const state_node_id null_id; - return impl->get_node_at_revision( revision, null_id, lock ); -} - -state_node_ptr database::get_node_at_revision( uint64_t revision, const state_node_id& child_id, const unique_lock_ptr& lock ) const -{ - return impl->get_node_at_revision( revision, child_id, lock ); -} - -state_node_ptr database::get_node_at_revision( uint64_t revision, const unique_lock_ptr& lock ) const -{ - static const state_node_id null_id; - return impl->get_node_at_revision( revision, null_id, lock ); -} - -state_node_ptr database::get_node( const state_node_id& node_id, const shared_lock_ptr& lock ) const -{ - return impl->get_node( node_id, lock ); -} - -state_node_ptr database::get_node( const state_node_id& node_id, const unique_lock_ptr& lock ) const -{ - return impl->get_node( node_id, lock ); -} - -state_node_ptr database::create_writable_node( const state_node_id& parent_id, const state_node_id& new_id, const protocol::block_header& header, const shared_lock_ptr& lock ) -{ - return impl->create_writable_node( parent_id, new_id, header, lock ); -} - -state_node_ptr database::create_writable_node( const state_node_id& parent_id, const state_node_id& new_id, const protocol::block_header& header, const unique_lock_ptr& lock ) -{ - return impl->create_writable_node( parent_id, new_id, header, lock ); -} - -state_node_ptr database::clone_node( const state_node_id& node_id, const state_node_id& new_id, const protocol::block_header& header, const shared_lock_ptr& lock ) -{ - return impl->clone_node( node_id, new_id, header, lock ); -} - -state_node_ptr database::clone_node( const state_node_id& node_id, const state_node_id& new_id, const protocol::block_header& header, const unique_lock_ptr& lock ) -{ - return impl->clone_node( node_id, new_id, header, lock ); -} - -void database::finalize_node( const state_node_id& node_id, const shared_lock_ptr& lock ) -{ - impl->finalize_node( node_id, lock ); -} - -void database::finalize_node( const state_node_id& node_id, const unique_lock_ptr& lock ) -{ - impl->finalize_node( node_id, lock ); -} - -void database::discard_node( const state_node_id& node_id, const shared_lock_ptr& lock ) -{ - static const std::unordered_set< state_node_id > whitelist; - impl->discard_node( node_id, whitelist, lock ); -} - -void database::discard_node( const state_node_id& node_id, const unique_lock_ptr& lock ) -{ - static const std::unordered_set< state_node_id > whitelist; - impl->discard_node( node_id, whitelist, lock ); -} - -void database::commit_node( const state_node_id& node_id, const unique_lock_ptr& lock ) -{ - impl->commit_node( node_id, lock ? lock : get_unique_lock() ); -} - -state_node_ptr database::get_head( const shared_lock_ptr& lock ) const -{ - return impl->get_head( lock ); -} - -state_node_ptr database::get_head( const unique_lock_ptr& lock ) const -{ - return impl->get_head( lock ); -} - -std::vector< state_node_ptr > database::get_fork_heads( const shared_lock_ptr& lock ) const -{ - return impl->get_fork_heads( lock ); -} - -std::vector< state_node_ptr > database::get_fork_heads( const unique_lock_ptr& lock ) const -{ - return impl->get_fork_heads( lock ); -} - -std::vector< state_node_ptr > database::get_all_nodes( const shared_lock_ptr& lock ) const -{ - return impl->get_all_nodes( lock ); -} - -std::vector< state_node_ptr > database::get_all_nodes( const unique_lock_ptr& lock ) const -{ - return impl->get_all_nodes( lock ); -} - -state_node_ptr database::get_root( const shared_lock_ptr& lock ) const -{ - return impl->get_root( lock ); -} - -state_node_ptr database::get_root( const unique_lock_ptr& lock ) const -{ - return impl->get_root( lock ); -} - -} // koinos::state_db diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt new file mode 100644 index 0000000..f994468 --- /dev/null +++ b/src/CMakeLists.txt @@ -0,0 +1,53 @@ +add_library(state_db + koinos/state_db/state_db.cpp + koinos/state_db/state_delta.cpp + koinos/state_db/merge_iterator.cpp + koinos/state_db/backends/backend.cpp + koinos/state_db/backends/iterator.cpp + koinos/state_db/backends/map/map_backend.cpp + koinos/state_db/backends/map/map_iterator.cpp + koinos/state_db/backends/rocksdb/rocksdb_backend.cpp + koinos/state_db/backends/rocksdb/rocksdb_iterator.cpp + koinos/state_db/backends/rocksdb/object_cache.cpp + + koinos/state_db/merge_iterator.hpp + koinos/state_db/state_delta.hpp + + ${PROJECT_SOURCE_DIR}/include/koinos/state_db/state_db_types.hpp + ${PROJECT_SOURCE_DIR}/include/koinos/state_db/state_db.hpp + ${PROJECT_SOURCE_DIR}/include/koinos/state_db/backends/backend.hpp + ${PROJECT_SOURCE_DIR}/include/koinos/state_db/backends/exceptions.hpp + ${PROJECT_SOURCE_DIR}/include/koinos/state_db/backends/iterator.hpp + ${PROJECT_SOURCE_DIR}/include/koinos/state_db/backends/types.hpp + ${PROJECT_SOURCE_DIR}/include/koinos/state_db/backends/map/map_backend.hpp + ${PROJECT_SOURCE_DIR}/include/koinos/state_db/backends/map/map_iterator.hpp + ${PROJECT_SOURCE_DIR}/include/koinos/state_db/backends/rocksdb/exceptions.hpp + ${PROJECT_SOURCE_DIR}/include/koinos/state_db/backends/rocksdb/object_cache.hpp + ${PROJECT_SOURCE_DIR}/include/koinos/state_db/backends/rocksdb/rocksdb_backend.hpp + ${PROJECT_SOURCE_DIR}/include/koinos/state_db/backends/rocksdb/rocksdb_iterator.hpp) + +target_link_libraries( + state_db + PUBLIC + Koinos::exception + Koinos::proto + Koinos::crypto + RocksDB::rocksdb) + +koinos_add_format(TARGET state_db) + +target_include_directories( + state_db + PUBLIC + $ + $ + PRIVATE + $) + +koinos_install(TARGETS state_db) + +install( + DIRECTORY + ${PROJECT_SOURCE_DIR}/include + DESTINATION + ${CMAKE_INSTALL_PREFIX}) diff --git a/libraries/state_db/backends/backend.cpp b/src/koinos/state_db/backends/backend.cpp similarity index 68% rename from libraries/state_db/backends/backend.cpp rename to src/koinos/state_db/backends/backend.cpp index 7c31839..fdd91ac 100644 --- a/libraries/state_db/backends/backend.cpp +++ b/src/koinos/state_db/backends/backend.cpp @@ -2,53 +2,53 @@ namespace koinos::state_db::backends { -abstract_backend::abstract_backend() : - _id( crypto::multihash::zero( crypto::multicodec::sha2_256 ) ) +abstract_backend::abstract_backend(): + _id( crypto::multihash::zero( crypto::multicodec::sha2_256 ) ) {} bool abstract_backend::empty() const { - return size() == 0; + return size() == 0; } abstract_backend::size_type abstract_backend::revision() const { - return _revision; + return _revision; } void abstract_backend::set_revision( abstract_backend::size_type revision ) { - _revision = revision; + _revision = revision; } const crypto::multihash& abstract_backend::id() const { - return _id; + return _id; } void abstract_backend::set_id( const crypto::multihash& id ) { - _id = id; + _id = id; } const crypto::multihash& abstract_backend::merkle_root() const { - return _merkle_root; + return _merkle_root; } void abstract_backend::set_merkle_root( const crypto::multihash& merkle_root ) { - _merkle_root = merkle_root; + _merkle_root = merkle_root; } const protocol::block_header& abstract_backend::block_header() const { - return _header; + return _header; } void abstract_backend::set_block_header( const protocol::block_header& header ) { - _header = header; + _header = header; } -} // koinos::state_db::backends +} // namespace koinos::state_db::backends diff --git a/src/koinos/state_db/backends/iterator.cpp b/src/koinos/state_db/backends/iterator.cpp new file mode 100644 index 0000000..60e9ede --- /dev/null +++ b/src/koinos/state_db/backends/iterator.cpp @@ -0,0 +1,66 @@ +#include +#include + +namespace koinos::state_db::backends { + +iterator::iterator( std::unique_ptr< abstract_iterator > itr ): + _itr( std::move( itr ) ) +{} + +iterator::iterator( const iterator& other ): + _itr( other._itr->copy() ) +{} + +iterator::iterator( iterator&& other ): + _itr( std::move( other._itr ) ) +{} + +const iterator::value_type& iterator::operator*() const +{ + return **_itr; +} + +const iterator::key_type& iterator::key() const +{ + return _itr->key(); +} + +iterator& iterator::operator++() +{ + ++( *_itr ); + return *this; +} + +iterator& iterator::operator--() +{ + --( *_itr ); + return *this; +} + +iterator& iterator::operator=( iterator&& other ) +{ + _itr = std::move( other._itr ); + return *this; +} + +bool iterator::valid() const +{ + return _itr && _itr->valid(); +} + +bool operator==( const iterator& x, const iterator& y ) +{ + if( x.valid() && y.valid() ) + { + return *x == *y; + } + + return x.valid() == y.valid(); +} + +bool operator!=( const iterator& x, const iterator& y ) +{ + return !( x == y ); +} + +} // namespace koinos::state_db::backends diff --git a/src/koinos/state_db/backends/map/map_backend.cpp b/src/koinos/state_db/backends/map/map_backend.cpp new file mode 100644 index 0000000..580f470 --- /dev/null +++ b/src/koinos/state_db/backends/map/map_backend.cpp @@ -0,0 +1,76 @@ +#include + +namespace koinos::state_db::backends::map { + +map_backend::map_backend() {} + +map_backend::~map_backend() {} + +iterator map_backend::begin() noexcept +{ + return iterator( + std::make_unique< map_iterator >( std::make_unique< map_iterator::iterator_impl >( _map.begin() ), _map ) ); +} + +iterator map_backend::end() noexcept +{ + return iterator( + std::make_unique< map_iterator >( std::make_unique< map_iterator::iterator_impl >( _map.end() ), _map ) ); +} + +void map_backend::put( const key_type& k, const value_type& v ) +{ + _map.insert_or_assign( k, v ); +} + +const map_backend::value_type* map_backend::get( const key_type& key ) const +{ + auto itr = _map.find( key ); + if( itr == _map.end() ) + { + return nullptr; + } + + return &itr->second; +} + +void map_backend::erase( const key_type& k ) +{ + _map.erase( k ); +} + +void map_backend::clear() noexcept +{ + _map.clear(); +} + +map_backend::size_type map_backend::size() const noexcept +{ + return _map.size(); +} + +iterator map_backend::find( const key_type& k ) +{ + return iterator( + std::make_unique< map_iterator >( std::make_unique< map_iterator::iterator_impl >( _map.find( k ) ), _map ) ); +} + +iterator map_backend::lower_bound( const key_type& k ) +{ + return iterator( + std::make_unique< map_iterator >( std::make_unique< map_iterator::iterator_impl >( _map.lower_bound( k ) ), + _map ) ); +} + +void map_backend::start_write_batch() {} + +void map_backend::end_write_batch() {} + +void map_backend::store_metadata() {} + +std::shared_ptr< abstract_backend > map_backend::clone() const +{ + return std::make_shared< map_backend >( *this ); +} + +} // namespace koinos::state_db::backends::map diff --git a/src/koinos/state_db/backends/map/map_iterator.cpp b/src/koinos/state_db/backends/map/map_iterator.cpp new file mode 100644 index 0000000..5b84960 --- /dev/null +++ b/src/koinos/state_db/backends/map/map_iterator.cpp @@ -0,0 +1,53 @@ +#include + +#include + +namespace koinos::state_db::backends::map { + +map_iterator::map_iterator( std::unique_ptr< std::map< detail::key_type, detail::value_type >::iterator > itr, + const std::map< detail::key_type, detail::value_type >& map ): + _itr( std::move( itr ) ), + _map( map ) +{} + +map_iterator::~map_iterator() {} + +const map_iterator::value_type& map_iterator::operator*() const +{ + KOINOS_ASSERT( valid(), iterator_exception, "iterator operation is invalid" ); + return ( *_itr )->second; +} + +const map_iterator::key_type& map_iterator::key() const +{ + KOINOS_ASSERT( valid(), iterator_exception, "iterator operation is invalid" ); + return ( *_itr )->first; +} + +abstract_iterator& map_iterator::operator++() +{ + KOINOS_ASSERT( valid(), iterator_exception, "iterator operation is invalid" ); + ++( *_itr ); + return *this; +} + +abstract_iterator& map_iterator::operator--() +{ + KOINOS_ASSERT( *_itr != _map.begin(), iterator_exception, "iterator operation is invalid" ); + --( *_itr ); + return *this; +} + +bool map_iterator::valid() const +{ + return _itr && *_itr != _map.end(); +} + +std::unique_ptr< abstract_iterator > map_iterator::copy() const +{ + return std::make_unique< map_iterator >( + std::make_unique< std::map< detail::key_type, detail::value_type >::iterator >( *_itr ), + _map ); +} + +} // namespace koinos::state_db::backends::map diff --git a/src/koinos/state_db/backends/rocksdb/object_cache.cpp b/src/koinos/state_db/backends/rocksdb/object_cache.cpp new file mode 100644 index 0000000..3573654 --- /dev/null +++ b/src/koinos/state_db/backends/rocksdb/object_cache.cpp @@ -0,0 +1,76 @@ +#include + +#include + +namespace koinos::state_db::backends::rocksdb { + +object_cache::object_cache( std::size_t size ): + _cache_max_size( size ) +{} + +object_cache::~object_cache() {} + +std::pair< bool, std::shared_ptr< const object_cache::value_type > > object_cache::get( const key_type& k ) +{ + auto itr = _object_map.find( k ); + if( itr == _object_map.end() ) + return std::make_pair( false, std::shared_ptr< const object_cache::value_type >() ); + + // Erase the entry from the list and push front + _lru_list.erase( itr->second.second ); + _lru_list.push_front( k ); + auto val = itr->second.first; + + _object_map[ k ] = std::make_pair( val, _lru_list.begin() ); + + assert( _object_map.size() == _lru_list.size() ); + + return std::make_pair( true, val ); +} + +std::shared_ptr< const object_cache::value_type > +object_cache::put( const key_type& k, std::shared_ptr< const object_cache::value_type > v ) +{ + remove( k ); + + // Min 1 byte for key and 1 byte for value + auto entry_size = std::max( k.size() + ( v ? v->size() : 0 ), std::size_t( 2 ) ); + + // If the cache is full, remove the last entry from the map and pop back + while( _cache_size + entry_size > _cache_max_size ) + remove( _lru_list.back() ); + + _lru_list.push_front( k ); + _object_map[ k ] = std::make_pair( v, _lru_list.begin() ); + _cache_size += entry_size; + + assert( _object_map.size() == _lru_list.size() ); + + return v; +} + +void object_cache::remove( const key_type& k ) +{ + auto itr = _object_map.find( k ); + if( itr != _object_map.end() ) + { + _cache_size -= std::max( k.size() + ( itr->second.first ? itr->second.first->size() : 0 ), std::size_t( 2 ) ); + _lru_list.erase( itr->second.second ); + _object_map.erase( itr ); + } + + assert( _object_map.size() == _lru_list.size() ); +} + +void object_cache::clear() +{ + _object_map.clear(); + _lru_list.clear(); +} + +std::mutex& object_cache::get_mutex() +{ + return _mutex; +} + +} // namespace koinos::state_db::backends::rocksdb diff --git a/src/koinos/state_db/backends/rocksdb/rocksdb_backend.cpp b/src/koinos/state_db/backends/rocksdb/rocksdb_backend.cpp new file mode 100644 index 0000000..1c2baf8 --- /dev/null +++ b/src/koinos/state_db/backends/rocksdb/rocksdb_backend.cpp @@ -0,0 +1,515 @@ +#include + +#include +#include +#include +#include + +#include +#include + +namespace koinos::state_db::backends::rocksdb { + +namespace constants { +constexpr std::size_t cache_size = 64 << 20; // 64 MB +constexpr std::size_t max_open_files = 64; + +constexpr std::size_t default_column_index = 0; +const std::string objects_column_name = "objects"; +constexpr std::size_t objects_column_index = 1; +const std::string metadata_column_name = "metadata"; +constexpr std::size_t metadata_column_index = 2; + +const std::string size_key = "size"; +const std::string revision_key = "revision"; +const std::string id_key = "id"; +const std::string merkle_root_key = "merkle_root"; +const std::string block_header_key = "block_header"; + +constexpr rocksdb_backend::size_type size_default = 0; +constexpr rocksdb_backend::size_type revision_default = 0; +const crypto::multihash id_default = crypto::multihash::zero( crypto::multicodec::sha2_256 ); +const crypto::multihash merkle_root_default = crypto::multihash::zero( crypto::multicodec::sha2_256 ); +const protocol::block_header block_header_default = protocol::block_header(); +} // namespace constants + +bool setup_database( const std::filesystem::path& p ) +{ + std::vector< ::rocksdb::ColumnFamilyDescriptor > defs; + defs.emplace_back( constants::objects_column_name, ::rocksdb::ColumnFamilyOptions() ); + defs.emplace_back( constants::metadata_column_name, ::rocksdb::ColumnFamilyOptions() ); + + ::rocksdb::Options options; + options.create_if_missing = true; + + ::rocksdb::DB* db; + auto status = ::rocksdb::DB::Open( options, p.string(), &db ); + + KOINOS_ASSERT( status.ok(), + rocksdb_open_exception, + "unable to open rocksdb database" + + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); + + auto db_ptr = std::shared_ptr< ::rocksdb::DB >( db ); + + std::vector< ::rocksdb::ColumnFamilyHandle* > handles; + status = db->CreateColumnFamilies( defs, &handles ); + + if( !status.ok() ) + { + return false; + } + + std::vector< std::shared_ptr< ::rocksdb::ColumnFamilyHandle > > handle_ptrs; + + for( auto* h: handles ) + handle_ptrs.emplace_back( h ); + + ::rocksdb::WriteOptions wopts; + + status = db_ptr->Put( wopts, + &*handle_ptrs[ 1 ], + ::rocksdb::Slice( constants::size_key ), + ::rocksdb::Slice( util::converter::as< std::string >( constants::size_default ) ) ); + + if( !status.ok() ) + { + handle_ptrs.clear(); + db_ptr.reset(); + return false; + } + + status = db_ptr->Put( wopts, + &*handle_ptrs[ 1 ], + ::rocksdb::Slice( constants::revision_key ), + ::rocksdb::Slice( util::converter::as< std::string >( constants::revision_default ) ) ); + + if( !status.ok() ) + { + handle_ptrs.clear(); + db_ptr.reset(); + return false; + } + + status = db_ptr->Put( wopts, + &*handle_ptrs[ 1 ], + ::rocksdb::Slice( constants::id_key ), + ::rocksdb::Slice( util::converter::as< std::string >( constants::id_default ) ) ); + + if( !status.ok() ) + { + handle_ptrs.clear(); + db_ptr.reset(); + return false; + } + + status = db_ptr->Put( wopts, + &*handle_ptrs[ 1 ], + ::rocksdb::Slice( constants::merkle_root_key ), + ::rocksdb::Slice( util::converter::as< std::string >( constants::merkle_root_default ) ) ); + + if( !status.ok() ) + { + handle_ptrs.clear(); + db_ptr.reset(); + return false; + } + + status = db_ptr->Put( wopts, + &*handle_ptrs[ 1 ], + ::rocksdb::Slice( constants::block_header_key ), + ::rocksdb::Slice( util::converter::as< std::string >( constants::block_header_default ) ) ); + + handle_ptrs.clear(); + db_ptr.reset(); + + return status.ok(); +} + +rocksdb_backend::rocksdb_backend(): + _cache( std::make_shared< object_cache >( constants::cache_size ) ), + _ropts( std::make_shared< ::rocksdb::ReadOptions >() ) +{} + +rocksdb_backend::~rocksdb_backend() +{ + close(); +} + +void rocksdb_backend::open( const std::filesystem::path& p ) +{ + KOINOS_ASSERT( p.is_absolute(), rocksdb_open_exception, "path must be absolute, ${p}", ( "p", p.string() ) ); + KOINOS_ASSERT( std::filesystem::exists( p ), + rocksdb_open_exception, + "path does not exist, ${p}", + ( "p", p.string() ) ); + + std::vector< ::rocksdb::ColumnFamilyDescriptor > defs; + defs.emplace_back( ::rocksdb::kDefaultColumnFamilyName, ::rocksdb::ColumnFamilyOptions() ); + defs.emplace_back( constants::objects_column_name, ::rocksdb::ColumnFamilyOptions() ); + defs.emplace_back( constants::metadata_column_name, ::rocksdb::ColumnFamilyOptions() ); + + std::vector< ::rocksdb::ColumnFamilyHandle* > handles; + + ::rocksdb::Options options; + options.max_open_files = constants::max_open_files; + ::rocksdb::DB* db; + + auto status = ::rocksdb::DB::Open( options, p.string(), defs, &handles, &db ); + + if( !status.ok() ) + { + KOINOS_ASSERT( setup_database( p ), rocksdb_setup_exception, "unable to configure rocksdb database" ); + + status = ::rocksdb::DB::Open( options, p.string(), defs, &handles, &db ); + KOINOS_ASSERT( status.ok(), + rocksdb_open_exception, + "unable to open rocksdb database" + + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); + } + + _db = std::shared_ptr< ::rocksdb::DB >( db ); + + for( auto* h: handles ) + _handles.emplace_back( h ); + + try + { + load_metadata(); + } + catch( ... ) + { + _handles.clear(); + _db.reset(); + throw; + } +} + +void rocksdb_backend::close() +{ + if( _db ) + { + store_metadata(); + flush(); + + ::rocksdb::CancelAllBackgroundWork( &*_db, true ); + _handles.clear(); + _db.reset(); + std::lock_guard lock( _cache->get_mutex() ); + _cache->clear(); + } +} + +void rocksdb_backend::flush() +{ + KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); + + static const ::rocksdb::FlushOptions flush_options; + + _db->Flush( flush_options, &*_handles[ constants::objects_column_index ] ); + _db->Flush( flush_options, &*_handles[ constants::metadata_column_index ] ); +} + +void rocksdb_backend::start_write_batch() +{ + KOINOS_ASSERT( !_write_batch, rocksdb_session_in_progress, "session already in progress" ); + _write_batch.emplace(); +} + +void rocksdb_backend::end_write_batch() +{ + if( _write_batch ) + { + auto status = _db->Write( _wopts, &*_write_batch ); + KOINOS_ASSERT( status.ok(), + rocksdb_write_exception, + "unable to write session to rocksdb database" + + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); + _write_batch.reset(); + } +} + +iterator rocksdb_backend::begin() +{ + KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); + + auto itr = std::make_unique< rocksdb_iterator >( _db, _handles[ constants::objects_column_index ], _ropts, _cache ); + itr->_iter = std::unique_ptr< ::rocksdb::Iterator >( + _db->NewIterator( *_ropts, &*_handles[ constants::objects_column_index ] ) ); + itr->_iter->SeekToFirst(); + + return iterator( std::unique_ptr< abstract_iterator >( std::move( itr ) ) ); +} + +iterator rocksdb_backend::end() +{ + KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); + + auto itr = std::make_unique< rocksdb_iterator >( _db, _handles[ constants::objects_column_index ], _ropts, _cache ); + itr->_iter = std::unique_ptr< ::rocksdb::Iterator >( + _db->NewIterator( *_ropts, &*_handles[ constants::objects_column_index ] ) ); + + return iterator( std::unique_ptr< abstract_iterator >( std::move( itr ) ) ); +} + +void rocksdb_backend::put( const key_type& k, const value_type& v ) +{ + KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); + bool exists = get( k ); + + ::rocksdb::Status status; + + if( _write_batch ) + { + status = + _write_batch->Put( &*_handles[ constants::objects_column_index ], ::rocksdb::Slice( k ), ::rocksdb::Slice( v ) ); + } + else + { + status = + _db->Put( _wopts, &*_handles[ constants::objects_column_index ], ::rocksdb::Slice( k ), ::rocksdb::Slice( v ) ); + } + + KOINOS_ASSERT( status.ok(), + rocksdb_write_exception, + "unable to write to rocksdb database" + + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); + + if( !exists ) + { + _size++; + } + + std::lock_guard lock( _cache->get_mutex() ); + _cache->put( k, std::make_shared< const object_cache::value_type >( v ) ); +} + +const rocksdb_backend::value_type* rocksdb_backend::get( const key_type& k ) const +{ + KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); + + std::lock_guard lock( _cache->get_mutex() ); + auto [ cache_hit, ptr ] = _cache->get( k ); + if( cache_hit ) + { + if( ptr ) + return &*ptr; + + return nullptr; + } + + value_type value; + auto status = _db->Get( *_ropts, &*_handles[ constants::objects_column_index ], ::rocksdb::Slice( k ), &value ); + + if( status.ok() ) + return &*_cache->put( k, std::make_shared< const object_cache::value_type >( value ) ); + else if( status.IsNotFound() ) + _cache->put( k, std::shared_ptr< const object_cache::value_type >() ); + + return nullptr; +} + +void rocksdb_backend::erase( const key_type& k ) +{ + KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); + + bool exists = get( k ); + auto status = _db->Delete( _wopts, &*_handles[ constants::objects_column_index ], ::rocksdb::Slice( k ) ); + + KOINOS_ASSERT( status.ok(), + rocksdb_write_exception, + "unable to write to rocksdb database" + + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); + + if( exists ) + { + _size--; + } + + std::lock_guard lock( _cache->get_mutex() ); + _cache->put( k, std::shared_ptr< const object_cache::value_type >() ); +} + +void rocksdb_backend::clear() +{ + KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); + + for( auto h: _handles ) + { + _db->DropColumnFamily( &*h ); + } + + _handles.clear(); + _db.reset(); + std::lock_guard lock( _cache->get_mutex() ); + _cache->clear(); +} + +rocksdb_backend::size_type rocksdb_backend::size() const +{ + KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); + + return _size; +} + +iterator rocksdb_backend::find( const key_type& k ) +{ + KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); + + auto itr = std::make_unique< rocksdb_iterator >( _db, _handles[ constants::objects_column_index ], _ropts, _cache ); + auto itr_ptr = std::unique_ptr< ::rocksdb::Iterator >( + _db->NewIterator( *_ropts, &*_handles[ constants::objects_column_index ] ) ); + + itr_ptr->Seek( ::rocksdb::Slice( k ) ); + + if( itr_ptr->Valid() ) + { + auto key_slice = itr_ptr->key(); + + if( k.size() == key_slice.size() && memcmp( k.data(), key_slice.data(), k.size() ) == 0 ) + { + itr->_iter = std::move( itr_ptr ); + } + } + + return iterator( std::unique_ptr< abstract_iterator >( std::move( itr ) ) ); +} + +iterator rocksdb_backend::lower_bound( const key_type& k ) +{ + KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); + + auto itr = std::make_unique< rocksdb_iterator >( _db, _handles[ constants::objects_column_index ], _ropts, _cache ); + itr->_iter = std::unique_ptr< ::rocksdb::Iterator >( + _db->NewIterator( *_ropts, &*_handles[ constants::objects_column_index ] ) ); + + itr->_iter->Seek( ::rocksdb::Slice( k ) ); + + return iterator( std::unique_ptr< abstract_iterator >( std::move( itr ) ) ); +} + +void rocksdb_backend::load_metadata() +{ + KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); + + std::string value; + auto status = _db->Get( *_ropts, + &*_handles[ constants::metadata_column_index ], + ::rocksdb::Slice( constants::size_key ), + &value ); + + KOINOS_ASSERT( status.ok(), + rocksdb_read_exception, + "unable to read from rocksdb database" + + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); + + _size = util::converter::to< size_type >( value ); + + status = _db->Get( *_ropts, + &*_handles[ constants::metadata_column_index ], + ::rocksdb::Slice( constants::revision_key ), + &value ); + + KOINOS_ASSERT( status.ok(), + rocksdb_read_exception, + "unable to read from rocksdb database" + + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); + + set_revision( util::converter::to< size_type >( value ) ); + + status = + _db->Get( *_ropts, &*_handles[ constants::metadata_column_index ], ::rocksdb::Slice( constants::id_key ), &value ); + + KOINOS_ASSERT( status.ok(), + rocksdb_read_exception, + "unable to read from rocksdb database" + + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); + + set_id( util::converter::to< crypto::multihash >( value ) ); + + status = _db->Get( *_ropts, + &*_handles[ constants::metadata_column_index ], + ::rocksdb::Slice( constants::merkle_root_key ), + &value ); + + KOINOS_ASSERT( status.ok(), + rocksdb_read_exception, + "unable to read from rocksdb database" + + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); + + set_merkle_root( util::converter::to< crypto::multihash >( value ) ); + + status = _db->Get( *_ropts, + &*_handles[ constants::metadata_column_index ], + ::rocksdb::Slice( constants::block_header_key ), + &value ); + + KOINOS_ASSERT( status.ok(), + rocksdb_read_exception, + "unable to read from rocksdb database" + + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); + + set_block_header( util::converter::to< protocol::block_header >( value ) ); +} + +void rocksdb_backend::store_metadata() +{ + KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); + + auto status = _db->Put( _wopts, + &*_handles[ constants::metadata_column_index ], + ::rocksdb::Slice( constants::size_key ), + ::rocksdb::Slice( util::converter::as< std::string >( _size ) ) ); + + KOINOS_ASSERT( status.ok(), + rocksdb_write_exception, + "unable to write to rocksdb database" + + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); + + status = _db->Put( _wopts, + &*_handles[ constants::metadata_column_index ], + ::rocksdb::Slice( constants::revision_key ), + ::rocksdb::Slice( util::converter::as< std::string >( revision() ) ) ); + + KOINOS_ASSERT( status.ok(), + rocksdb_write_exception, + "unable to write to rocksdb database" + + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); + + status = _db->Put( _wopts, + &*_handles[ constants::metadata_column_index ], + ::rocksdb::Slice( constants::id_key ), + ::rocksdb::Slice( util::converter::as< std::string >( id() ) ) ); + + KOINOS_ASSERT( status.ok(), + rocksdb_write_exception, + "unable to write to rocksdb database" + + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); + + status = _db->Put( _wopts, + &*_handles[ constants::metadata_column_index ], + ::rocksdb::Slice( constants::merkle_root_key ), + ::rocksdb::Slice( util::converter::as< std::string >( merkle_root() ) ) ); + + KOINOS_ASSERT( status.ok(), + rocksdb_write_exception, + "unable to write to rocksdb database" + + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); + + status = _db->Put( _wopts, + &*_handles[ constants::metadata_column_index ], + ::rocksdb::Slice( constants::block_header_key ), + ::rocksdb::Slice( util::converter::as< std::string >( block_header() ) ) ); + + KOINOS_ASSERT( status.ok(), + rocksdb_write_exception, + "unable to write to rocksdb database" + + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); +} + +std::shared_ptr< abstract_backend > rocksdb_backend::clone() const +{ + KOINOS_THROW( internal_exception, "rocksdb_backend, 'clone' not implemented" ); +} + +} // namespace koinos::state_db::backends::rocksdb diff --git a/src/koinos/state_db/backends/rocksdb/rocksdb_iterator.cpp b/src/koinos/state_db/backends/rocksdb/rocksdb_iterator.cpp new file mode 100644 index 0000000..bb6412e --- /dev/null +++ b/src/koinos/state_db/backends/rocksdb/rocksdb_iterator.cpp @@ -0,0 +1,130 @@ +#include + +#include + +namespace koinos::state_db::backends::rocksdb { + +rocksdb_iterator::rocksdb_iterator( std::shared_ptr< ::rocksdb::DB > db, + std::shared_ptr< ::rocksdb::ColumnFamilyHandle > handle, + std::shared_ptr< const ::rocksdb::ReadOptions > opts, + std::shared_ptr< object_cache > cache ): + _db( db ), + _handle( handle ), + _opts( opts ), + _cache( cache ) +{} + +rocksdb_iterator::rocksdb_iterator( const rocksdb_iterator& other ): + _db( other._db ), + _handle( other._handle ), + _opts( other._opts ), + _cache( other._cache ), + _cache_value( other._cache_value ) +{ + if( other._iter ) + { + _iter.reset( _db->NewIterator( *_opts, &*_handle ) ); + + if( other._iter->Valid() ) + { + _iter->Seek( other._iter->key() ); + } + } +} + +rocksdb_iterator::~rocksdb_iterator() {} + +const rocksdb_iterator::value_type& rocksdb_iterator::operator*() const +{ + KOINOS_ASSERT( valid(), iterator_exception, "iterator operation is invalid" ); + + if( !_cache_value ) + { + update_cache_value(); + } + + return *_cache_value; +} + +const rocksdb_iterator::key_type& rocksdb_iterator::key() const +{ + KOINOS_ASSERT( valid(), iterator_exception, "iterator operation is invalid" ); + + if( !_key ) + { + update_cache_value(); + } + + return *_key; +} + +abstract_iterator& rocksdb_iterator::operator++() +{ + KOINOS_ASSERT( valid(), iterator_exception, "iterator operation is invalid" ); + + _iter->Next(); + KOINOS_ASSERT( _iter->status().ok(), iterator_exception, "iterator operation is invalid" ); + + update_cache_value(); + + return *this; +} + +abstract_iterator& rocksdb_iterator::operator--() +{ + if( !valid() ) + { + _iter.reset( _db->NewIterator( *_opts, &*_handle ) ); + _iter->SeekToLast(); + } + else + { + _iter->Prev(); + KOINOS_ASSERT( _iter->status().ok(), iterator_exception, "iterator operation is invalid" ); + } + + update_cache_value(); + + return *this; +} + +bool rocksdb_iterator::valid() const +{ + return _iter && _iter->Valid(); +} + +std::unique_ptr< abstract_iterator > rocksdb_iterator::copy() const +{ + return std::make_unique< rocksdb_iterator >( *this ); +} + +void rocksdb_iterator::update_cache_value() const +{ + if( valid() ) + { + auto key_slice = _iter->key(); + auto key = std::make_shared< std::string >( key_slice.data(), key_slice.size() ); + std::lock_guard< std::mutex > lock( _cache->get_mutex() ); + auto [ cache_hit, ptr ] = _cache->get( *key ); + + if( cache_hit ) + KOINOS_ASSERT( ptr, rocksdb_internal_exception, "iterator erroneously hit null value in cache" ); + + if( !ptr ) + { + auto value_slice = _iter->value(); + ptr = _cache->put( *key, + std::make_shared< const object_cache::value_type >( value_slice.data(), value_slice.size() ) ); + } + + _cache_value = ptr; + _key = key; + } + else + { + _cache_value.reset(); + _key.reset(); + } +} + +} // namespace koinos::state_db::backends::rocksdb diff --git a/src/koinos/state_db/merge_iterator.cpp b/src/koinos/state_db/merge_iterator.cpp new file mode 100644 index 0000000..8b7e734 --- /dev/null +++ b/src/koinos/state_db/merge_iterator.cpp @@ -0,0 +1,311 @@ +#include + +namespace koinos::state_db::detail { + +iterator_wrapper::iterator_wrapper( backends::iterator&& i, + uint64_t r, + std::shared_ptr< backends::abstract_backend > b ): + itr( std::move( i ) ), + revision( r ), + backend( b ) +{} + +iterator_wrapper::iterator_wrapper( iterator_wrapper&& i ): + itr( std::move( i.itr ) ), + revision( i.revision ), + backend( i.backend ) +{} + +iterator_wrapper::iterator_wrapper( const iterator_wrapper& i ): + itr( i.itr ), + revision( i.revision ), + backend( i.backend ) +{} + +const iterator_wrapper& iterator_wrapper::self() const +{ + return *this; +} + +bool iterator_wrapper::valid() const +{ + return itr != backend->end(); +} + +bool iterator_compare_less::operator()( const iterator_wrapper& lhs, const iterator_wrapper& rhs ) const +{ + bool lh_valid = lhs.valid(); + bool rh_valid = rhs.valid(); + + if( !lh_valid && !rh_valid ) + return lhs.revision > rhs.revision; + if( !lh_valid ) + return false; + if( !rh_valid ) + return true; + + return lhs.itr.key() < rhs.itr.key(); +} + +bool iterator_compare_greater::operator()( const iterator_wrapper& lhs, const iterator_wrapper& rhs ) const +{ + bool lh_valid = lhs.valid(); + bool rh_valid = rhs.valid(); + + if( !lh_valid && !rh_valid ) + return lhs.revision > rhs.revision; + if( !lh_valid ) + return false; + if( !rh_valid ) + return true; + + return rhs.itr.key() < lhs.itr.key(); +} + +merge_iterator::merge_iterator( const merge_iterator& other ): + _itr_revision_index( other._itr_revision_index ), + _delta_deque( other._delta_deque ) +{} + +bool merge_iterator::operator==( const merge_iterator& other ) const +{ + // If both iterators are empty, they are true. + // But we use empty merge iterators as an optimization for an end itertor. + // So if one is empty, and the other is all end iterators, they are also equal. + if( _itr_revision_index.size() == 0 && other._itr_revision_index.size() == 0 ) + return true; + else if( _itr_revision_index.size() == 0 ) + return other.is_end(); + else if( other._itr_revision_index.size() == 0 ) + return is_end(); + + auto my_begin = _itr_revision_index.begin(); + auto other_begin = other._itr_revision_index.begin(); + + if( !my_begin->valid() && !other_begin->valid() ) + return true; + if( !my_begin->valid() || !other_begin->valid() ) + return false; + if( my_begin->revision != other_begin->revision ) + return false; + + return my_begin->itr == other_begin->itr; +} + +merge_iterator& merge_iterator::operator++() +{ + auto first_itr = _itr_revision_index.begin(); + KOINOS_ASSERT( first_itr->valid(), koinos::exception, "" ); + + _itr_revision_index.modify( first_itr, + []( iterator_wrapper& i ) + { + ++( i.itr ); + } ); + resolve_conflicts(); + + return *this; +} + +merge_iterator& merge_iterator::operator--() +{ + const auto& order_idx = _itr_revision_index.template get< by_order_revision >(); + + auto head_itr = order_idx.begin(); + std::optional< key_type > head_key; + + if( head_itr->valid() ) + { + head_key = head_itr->itr.key(); + } + + /* We are grabbing the current head value. + * Then iterate over all other iterators and rewind them until they have a value less + * than the current value. One of those values is what we want to decrement to. + */ + const auto& rev_idx = _itr_revision_index.template get< by_revision >(); + for( auto rev_itr = rev_idx.begin(); rev_itr != rev_idx.end(); ++rev_itr ) + { + // Only decrement iterators that have modified objects + if( rev_itr->backend->size() ) + { + auto begin = rev_itr->backend->begin(); + + if( !head_key ) + { + // If there was no valid key, then bring back each iterator once, it is gauranteed to be less than the + // current value (end()). + _itr_revision_index.modify( _itr_revision_index.iterator_to( *rev_itr ), + [ & ]( iterator_wrapper& i ) + { + --( i.itr ); + } ); + } + else + { + // Do an initial decrement if the iterator currently points to end() + if( !rev_itr->valid() ) + { + _itr_revision_index.modify( _itr_revision_index.iterator_to( *rev_itr ), + [ & ]( iterator_wrapper& i ) + { + --( i.itr ); + } ); + } + + // Decrement back to the first key that is less than the head key + while( rev_itr->itr.key() >= *head_key && rev_itr->itr != begin ) + { + _itr_revision_index.modify( _itr_revision_index.iterator_to( *rev_itr ), + [ & ]( iterator_wrapper& i ) + { + --( i.itr ); + } ); + } + } + + // The key at this point is guaranteed to be less than the head key (or at begin() and greator), but it + // might have been modified in a later index. We need to continue decrementing until we have a valid key. + bool dirty = true; + + while( dirty && rev_itr->valid() && rev_itr->itr != begin ) + { + dirty = is_dirty( rev_itr ); + + if( dirty ) + { + _itr_revision_index.modify( _itr_revision_index.iterator_to( *( rev_itr ) ), + []( iterator_wrapper& i ) + { + --( i.itr ); + } ); + } + } + } + } + + const auto& rev_order_idx = _itr_revision_index.template get< by_reverse_order_revision >(); + auto least_itr = rev_order_idx.begin(); + + if( _delta_deque.size() > 1 ) + { + // This next bit works in two modes. + // Some indices may not have had a value less than the previous head, so they will show up first, + // we need to increment through those values until we get the the new valid least value. + if( head_key ) + { + while( least_itr != rev_order_idx.end() && least_itr->valid() + && ( is_dirty( least_itr ) || least_itr->itr.key() >= *head_key ) ) + { + ++least_itr; + } + } + + // Now least_itr points to the new least value, unless it is end() + if( least_itr != rev_order_idx.end() ) + { + ++least_itr; + } + + // Now least_itr points to the next value. All of these are too much less, but are guaranteed to be valid. + // All values in this indices one past are gauranteed to be greater than the new least, or invalid by + // modification. We can increment all of them once, and then call resolve_conflicts for the new least value + // to become the head. + while( least_itr != rev_order_idx.end() && least_itr->valid() ) + { + _itr_revision_index.modify( _itr_revision_index.iterator_to( *( least_itr-- ) ), + []( iterator_wrapper& i ) + { + ++( i.itr ); + } ); + ++least_itr; + } + + resolve_conflicts(); + } + + return *this; +} + +const merge_iterator::value_type& merge_iterator::operator*() const +{ + return _itr_revision_index.begin()->itr.operator*(); +} + +const merge_iterator::key_type& merge_iterator::key() const +{ + auto first_itr = _itr_revision_index.begin(); + KOINOS_ASSERT( first_itr->valid(), koinos::exception, "" ); + + return first_itr->itr.key(); +} + +void merge_iterator::resolve_conflicts() +{ + auto first_itr = _itr_revision_index.begin(); + bool dirty = true; + + while( dirty && first_itr->valid() ) + { + dirty = is_dirty( first_itr ); + + if( dirty ) + { + _itr_revision_index.modify( first_itr, + []( iterator_wrapper& i ) + { + ++( i.itr ); + } ); + } + + first_itr = _itr_revision_index.begin(); + } +} + +bool merge_iterator::is_end() const +{ + return std::all_of( _itr_revision_index.begin(), + _itr_revision_index.end(), + []( auto& i ) + { + return !i.valid(); + } ); +} + +merge_state::merge_state( std::shared_ptr< state_delta > head ): + _head( head ) +{} + +merge_iterator merge_state::begin() const +{ + return merge_iterator( _head, + [ & ]( std::shared_ptr< backends::abstract_backend > backend ) + { + return backend->begin(); + } ); +} + +merge_iterator merge_state::end() const +{ + return merge_iterator( _head, + [ & ]( std::shared_ptr< backends::abstract_backend > backend ) + { + return backend->end(); + } ); +} + +const merge_state::value_type* merge_state::find( const key_type& key ) const +{ + return _head->find( key ); +} + +merge_iterator merge_state::lower_bound( const key_type& key ) const +{ + return merge_iterator( _head, + [ & ]( std::shared_ptr< backends::abstract_backend > backend ) + { + return backend->lower_bound( key ); + } ); +} + +} // namespace koinos::state_db::detail diff --git a/src/koinos/state_db/merge_iterator.hpp b/src/koinos/state_db/merge_iterator.hpp new file mode 100644 index 0000000..28d44ee --- /dev/null +++ b/src/koinos/state_db/merge_iterator.hpp @@ -0,0 +1,154 @@ +#pragma once + +#include + +#include + +#include +#include +#include +#include +#include + +#include + +namespace koinos::state_db::detail { + +using namespace boost::multi_index; + +struct iterator_wrapper +{ + iterator_wrapper( backends::iterator&& i, uint64_t r, std::shared_ptr< backends::abstract_backend > b ); + iterator_wrapper( iterator_wrapper&& i ); + iterator_wrapper( const iterator_wrapper& i ); + + const iterator_wrapper& self() const; + bool valid() const; + + backends::iterator itr; + std::shared_ptr< backends::abstract_backend > backend; + uint64_t revision; +}; + +// Uses revision as a tiebreaker only for when both iterators are invalid +// to enforce a total ordering on this comparator. The composite key on +// revision is still needed for the case when iterators are valid and equal. +// (i.e. lhs < rhs == false && rhs < lhs == false ) +struct iterator_compare_less +{ + bool operator()( const iterator_wrapper& lhs, const iterator_wrapper& rhs ) const; +}; + +struct iterator_compare_greater +{ + bool operator()( const iterator_wrapper& lhs, const iterator_wrapper& rhs ) const; +}; + +class merge_iterator: public boost::bidirectional_iterator_helper< merge_iterator, + typename state_delta::value_type, + std::size_t, + const typename state_delta::value_type*, + const typename state_delta::value_type& > +{ +public: + using key_type = state_delta::key_type; + using value_type = state_delta::value_type; + +private: + using iterator_type = backends::iterator; + using state_delta_ptr = std::shared_ptr< state_delta >; + + struct by_order_revision; + struct by_reverse_order_revision; + struct by_revision; + + using iter_revision_index_type = multi_index_container< + iterator_wrapper, + indexed_by< + ordered_unique< + tag< by_order_revision >, + composite_key< iterator_wrapper, + const_mem_fun< iterator_wrapper, const iterator_wrapper&, &iterator_wrapper::self >, + member< iterator_wrapper, uint64_t, &iterator_wrapper::revision > >, + composite_key_compare< iterator_compare_less, std::greater< uint64_t > > >, + ordered_unique< + tag< by_reverse_order_revision >, + composite_key< iterator_wrapper, + const_mem_fun< iterator_wrapper, const iterator_wrapper&, &iterator_wrapper::self >, + member< iterator_wrapper, uint64_t, &iterator_wrapper::revision > >, + composite_key_compare< iterator_compare_greater, std::greater< uint64_t > > >, + ordered_unique< tag< by_revision >, member< iterator_wrapper, uint64_t, &iterator_wrapper::revision > > > >; + + iter_revision_index_type _itr_revision_index; + std::deque< state_delta_ptr > _delta_deque; + +public: + template< typename Initializer > + merge_iterator( state_delta_ptr head, Initializer&& init ) + { + KOINOS_ASSERT( head, internal_error, "cannot create a merge iterator on a null delta" ); + auto current_delta = head; + + do + { + _delta_deque.push_front( current_delta ); + + _itr_revision_index.emplace( iterator_wrapper( std::move( init( current_delta->backend() ) ), + current_delta->revision(), + current_delta->backend() ) ); + + current_delta = current_delta->parent(); + } + while( current_delta ); + + resolve_conflicts(); + } + + merge_iterator( const merge_iterator& other ); + + bool operator==( const merge_iterator& other ) const; + + merge_iterator& operator++(); + merge_iterator& operator--(); + + const value_type& operator*() const; + + const key_type& key() const; + +private: + template< typename ItrType > + bool is_dirty( ItrType itr ) + { + bool dirty = false; + + for( auto i = _delta_deque.size() - 1; itr->revision < _delta_deque[ i ]->revision() && !dirty; --i ) + { + dirty = _delta_deque[ i ]->is_modified( itr->itr.key() ); + } + + return dirty; + } + + void resolve_conflicts(); + bool is_end() const; +}; + +class merge_state +{ +public: + using key_type = state_delta::key_type; + using value_type = state_delta::value_type; + + merge_state( std::shared_ptr< state_delta > head ); + + merge_iterator begin() const; + merge_iterator end() const; + + const value_type* find( const key_type& key ) const; + merge_iterator lower_bound( const key_type& key ) const; + +private: + std::shared_ptr< state_delta > _head; +}; + +} // namespace koinos::state_db::detail diff --git a/src/koinos/state_db/state_db.cpp b/src/koinos/state_db/state_db.cpp new file mode 100644 index 0000000..cb0e545 --- /dev/null +++ b/src/koinos/state_db/state_db.cpp @@ -0,0 +1,1505 @@ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace std { +template<> +struct hash< koinos::crypto::multihash > +{ + std::size_t operator()( const koinos::crypto::multihash& mh ) const + { + static const std::hash< std::string > hash_fn; + return hash_fn( koinos::util::converter::as< std::string >( mh ) ); + } +}; + +} // namespace std + +namespace koinos::chain { +bool operator==( const object_space& lhs, const object_space& rhs ) +{ + return lhs.system() == rhs.system() && lhs.zone() == rhs.zone() && lhs.id() == rhs.id(); +} + +bool operator<( const object_space& lhs, const object_space& rhs ) +{ + if( lhs.system() < rhs.system() ) + { + return true; + } + else if( lhs.system() > rhs.system() ) + { + return false; + } + + if( lhs.zone() < rhs.zone() ) + { + return true; + } + else if( lhs.system() > rhs.system() ) + { + return false; + } + + return lhs.id() < rhs.id(); +} +} // namespace koinos::chain + +namespace koinos::state_db { + +namespace detail { + +struct by_id; +struct by_revision; +struct by_parent; + +using state_delta_ptr = std::shared_ptr< state_delta >; + +using state_multi_index_type = boost::multi_index_container< + state_delta_ptr, + boost::multi_index::indexed_by< + boost::multi_index::ordered_unique< + boost::multi_index::tag< by_id >, + boost::multi_index::const_mem_fun< state_delta, const state_node_id&, &state_delta::id > >, + boost::multi_index::ordered_non_unique< + boost::multi_index::tag< by_parent >, + boost::multi_index::const_mem_fun< state_delta, const state_node_id&, &state_delta::parent_id > >, + boost::multi_index::ordered_non_unique< + boost::multi_index::tag< by_revision >, + boost::multi_index::const_mem_fun< state_delta, uint64_t, &state_delta::revision > > > >; + +const object_key null_key = object_key(); + +/** + * Private implementation of state_node interface. + * + * Maintains a pointer to database_impl, + * only allows reads / writes if the node corresponds to the DB's current state. + */ +class state_node_impl final +{ +public: + state_node_impl() {} + + ~state_node_impl() {} + + const object_value* get_object( const object_space& space, const object_key& key ) const; + std::pair< const object_value*, const object_key > get_next_object( const object_space& space, + const object_key& key ) const; + std::pair< const object_value*, const object_key > get_prev_object( const object_space& space, + const object_key& key ) const; + int64_t put_object( const object_space& space, const object_key& key, const object_value* val ); + int64_t remove_object( const object_space& space, const object_key& key ); + crypto::multihash merkle_root() const; + std::vector< protocol::state_delta_entry > get_delta_entries() const; + + state_delta_ptr _state; + shared_lock_ptr _lock; +}; + +/** + * Private implementation of database interface. + * + * This class relies heavily on using chainbase as a backing store. + * Only one state_node can be active at a time. This is enforced by + * _current_node which is a weak_ptr. New nodes will throw + * NodeNotExpired exception if a + */ +class database_impl final +{ +public: + database_impl() {} + + ~database_impl() + { + close_lockless(); + } + + shared_lock_ptr get_shared_lock() const; + unique_lock_ptr get_unique_lock() const; + bool verify_shared_lock( const shared_lock_ptr& lock ) const; + bool verify_unique_lock( const unique_lock_ptr& lock ) const; + + void open( const std::optional< std::filesystem::path >& p, + genesis_init_function init, + fork_resolution_algorithm algo, + const unique_lock_ptr& lock ); + void open( const std::optional< std::filesystem::path >& p, + genesis_init_function init, + state_node_comparator_function comp, + const unique_lock_ptr& lock ); + void open_lockless( const std::optional< std::filesystem::path >& p, + genesis_init_function init, + state_node_comparator_function comp ); + void close( const unique_lock_ptr& lock ); + void close_lockless(); + + void reset( const unique_lock_ptr& lock ); + state_node_ptr + get_node_at_revision( uint64_t revision, const state_node_id& child, const shared_lock_ptr& lock ) const; + state_node_ptr + get_node_at_revision( uint64_t revision, const state_node_id& child, const unique_lock_ptr& lock ) const; + state_node_ptr get_node( const state_node_id& node_id, const shared_lock_ptr& lock ) const; + state_node_ptr get_node( const state_node_id& node_id, const unique_lock_ptr& lock ) const; + state_node_ptr get_node_lockless( const state_node_id& node_id ) const; + state_node_ptr create_writable_node( const state_node_id& parent_id, + const state_node_id& new_id, + const protocol::block_header& header, + const shared_lock_ptr& lock ); + state_node_ptr create_writable_node( const state_node_id& parent_id, + const state_node_id& new_id, + const protocol::block_header& header, + const unique_lock_ptr& lock ); + state_node_ptr clone_node( const state_node_id& node_id, + const state_node_id& new_id, + const protocol::block_header& header, + const shared_lock_ptr& lock ); + state_node_ptr clone_node( const state_node_id& node_id, + const state_node_id& new_id, + const protocol::block_header& header, + const unique_lock_ptr& lock ); + void finalize_node( const state_node_id& node, const shared_lock_ptr& lock ); + void finalize_node( const state_node_id& node, const unique_lock_ptr& lock ); + void discard_node( const state_node_id& node, + const std::unordered_set< state_node_id >& whitelist, + const shared_lock_ptr& lock ); + void discard_node( const state_node_id& node, + const std::unordered_set< state_node_id >& whitelist, + const unique_lock_ptr& lock ); + void discard_node_lockless( const state_node_id& node, const std::unordered_set< state_node_id >& whitelist ); + void commit_node( const state_node_id& node, const unique_lock_ptr& lock ); + + state_node_ptr get_head( const shared_lock_ptr& lock ) const; + state_node_ptr get_head( const unique_lock_ptr& lock ) const; + state_node_ptr get_head_lockless() const; + std::vector< state_node_ptr > get_fork_heads( const shared_lock_ptr& lock ) const; + std::vector< state_node_ptr > get_fork_heads( const unique_lock_ptr& lock ) const; + std::vector< state_node_ptr > get_all_nodes( const shared_lock_ptr& lock ) const; + std::vector< state_node_ptr > get_all_nodes( const unique_lock_ptr& lock ) const; + state_node_ptr get_root( const shared_lock_ptr& lock ) const; + state_node_ptr get_root( const unique_lock_ptr& lock ) const; + state_node_ptr get_root_lockless() const; + + bool is_open() const; + + std::optional< std::filesystem::path > _path; + genesis_init_function _init_func = nullptr; + state_node_comparator_function _comp = nullptr; + + state_multi_index_type _index; + state_delta_ptr _head; + std::map< state_node_id, state_delta_ptr > _fork_heads; + state_delta_ptr _root; + + /* Regarding mutexes used for synchronizing state_db... + * + * There are three mutexes that can be locked. They are: + * - _index_mutex (locks access to _index) + * - _node_mutex (locks access to creating new state_node_ptrs) + * - state_delta::cv_mutex() (locks access to a state_delta cv) + * + * Shared locks on the _node_mutex must exist beyond the scope of calls to state_db, + * so _node_mutex must be locked first. + * + * Consequently, _index_mutex must be locked last. All functions in state_db MUST + * follow this convention or we risk deadlock. + */ + mutable std::timed_mutex _index_mutex; + mutable std::shared_mutex _node_mutex; + mutable std::shared_mutex _fork_heads_mutex; +}; + +shared_lock_ptr database_impl::get_shared_lock() const +{ + return std::make_shared< std::shared_lock< std::shared_mutex > >( _node_mutex ); +} + +unique_lock_ptr database_impl::get_unique_lock() const +{ + return std::make_shared< std::unique_lock< std::shared_mutex > >( _node_mutex ); +} + +bool database_impl::verify_shared_lock( const shared_lock_ptr& lock ) const +{ + if( !lock ) + return false; + + if( !lock->owns_lock() ) + return false; + + return lock->mutex() == &_node_mutex; +} + +bool database_impl::verify_unique_lock( const unique_lock_ptr& lock ) const +{ + if( !lock ) + return false; + + if( !lock->owns_lock() ) + return false; + + return lock->mutex() == &_node_mutex; +} + +void database_impl::reset( const unique_lock_ptr& lock ) +{ + // + // This method closes, wipes and re-opens the database. + // + // So the caller needs to be very careful to only call this method if deleting the database is desirable! + // + KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + std::unique_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); + + KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); + // Wipe and start over from empty database! + _root->clear(); + close_lockless(); + open_lockless( _path, _init_func, _comp ); +} + +void database_impl::open( const std::optional< std::filesystem::path >& p, + genesis_init_function init, + fork_resolution_algorithm algo, + const unique_lock_ptr& lock ) +{ + KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); + + state_node_comparator_function comp; + + switch( algo ) + { + case fork_resolution_algorithm::block_time: + comp = &block_time_comparator; + break; + case fork_resolution_algorithm::pob: + comp = &pob_comparator; + break; + case fork_resolution_algorithm::fifo: + [[fallthrough]]; + default: + comp = &fifo_comparator; + } + + open( p, init, comp, lock ); +} + +void database_impl::open( const std::optional< std::filesystem::path >& p, + genesis_init_function init, + state_node_comparator_function comp, + const unique_lock_ptr& lock ) +{ + KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + std::unique_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); + open_lockless( p, init, comp ); +} + +void database_impl::open_lockless( const std::optional< std::filesystem::path >& p, + genesis_init_function init, + state_node_comparator_function comp ) +{ + auto root = std::make_shared< state_node >(); + root->_impl->_state = std::make_shared< state_delta >( p ); + _init_func = init; + _comp = comp; + + if( !root->revision() && root->_impl->_state->is_empty() && _init_func ) + { + init( root ); + } + root->_impl->_state->finalize(); + _index.insert( root->_impl->_state ); + _root = root->_impl->_state; + _head = root->_impl->_state; + _fork_heads.insert_or_assign( _head->id(), _head ); + + _path = p; +} + +void database_impl::close( const unique_lock_ptr& lock ) +{ + KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + std::unique_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); + close_lockless(); +} + +void database_impl::close_lockless() +{ + _fork_heads.clear(); + _root.reset(); + _head.reset(); + _index.clear(); +} + +state_node_ptr database_impl::get_node_at_revision( uint64_t revision, + const state_node_id& child_id, + const shared_lock_ptr& lock ) const +{ + KOINOS_ASSERT( verify_shared_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); + KOINOS_ASSERT( revision >= _root->revision(), + illegal_argument, + "cannot ask for node with revision less than root. root rev: ${root}, requested: ${req}", + ( "root", _root->revision() )( "req", revision ) ); + + if( revision == _root->revision() ) + { + auto root = get_root_lockless(); + if( root ) + root->_impl->_lock = lock; + + return root; + } + + auto child = get_node_lockless( child_id ); + if( !child ) + child = get_head_lockless(); + + state_delta_ptr delta = child->_impl->_state; + + while( delta->revision() > revision ) + { + delta = delta->parent(); + } + + auto node_itr = _index.find( delta->id() ); + + KOINOS_ASSERT( node_itr != _index.end(), + internal_error, + "could not find state node associated with linked state_delta ${id}", + ( "id", delta->id() ) ); + + auto node = std::make_shared< state_node >(); + node->_impl->_state = *node_itr; + node->_impl->_lock = lock; + return node; +} + +state_node_ptr database_impl::get_node_at_revision( uint64_t revision, + const state_node_id& child_id, + const unique_lock_ptr& lock ) const +{ + KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); + KOINOS_ASSERT( revision >= _root->revision(), + illegal_argument, + "cannot ask for node with revision less than root. root rev: ${root}, requested: ${req}", + ( "root", _root->revision() )( "req", revision ) ); + + if( revision == _root->revision() ) + { + auto root = get_root_lockless(); + + return root; + } + + auto child = get_node_lockless( child_id ); + if( !child ) + child = get_head_lockless(); + + state_delta_ptr delta = child->_impl->_state; + + while( delta->revision() > revision ) + { + delta = delta->parent(); + } + + auto node_itr = _index.find( delta->id() ); + + KOINOS_ASSERT( node_itr != _index.end(), + internal_error, + "could not find state node associated with linked state_delta ${id}", + ( "id", delta->id() ) ); + + auto node = std::make_shared< state_node >(); + node->_impl->_state = *node_itr; + return node; +} + +state_node_ptr database_impl::get_node( const state_node_id& node_id, const shared_lock_ptr& lock ) const +{ + KOINOS_ASSERT( verify_shared_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + + auto node = get_node_lockless( node_id ); + if( node ) + node->_impl->_lock = lock; + + return node; +} + +state_node_ptr database_impl::get_node( const state_node_id& node_id, const unique_lock_ptr& lock ) const +{ + KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + + auto node = get_node_lockless( node_id ); + + return node; +} + +state_node_ptr database_impl::get_node_lockless( const state_node_id& node_id ) const +{ + KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); + + auto node_itr = _index.find( node_id ); + + if( node_itr != _index.end() ) + { + auto node = std::make_shared< state_node >(); + node->_impl->_state = *node_itr; + return node; + } + + return state_node_ptr(); +} + +state_node_ptr database_impl::create_writable_node( const state_node_id& parent_id, + const state_node_id& new_id, + const protocol::block_header& header, + const shared_lock_ptr& lock ) +{ + KOINOS_ASSERT( verify_shared_lock( lock ), illegal_argument, "database is not properly locked" ); + ; + + // Needs to be configurable + auto timeout = std::chrono::system_clock::now() + std::chrono::seconds( 1 ); + + state_node_ptr parent_state = get_node( parent_id, lock ); + + if( parent_state ) + { + std::unique_lock< std::timed_mutex > cv_lock( parent_state->_impl->_state->cv_mutex(), timeout ); + + // We need to own the lock + if( cv_lock.owns_lock() ) + { + // Check if the node is finalized + bool is_finalized = parent_state->is_finalized(); + + // If the node is finalized, try to wait for the node to be finalized + if( !is_finalized + && parent_state->_impl->_state->cv().wait_until( cv_lock, timeout ) == std::cv_status::no_timeout ) + is_finalized = parent_state->is_finalized(); + + // Finally, if the node is finalized, we can create a new writable node with the desired parent + if( is_finalized ) + { + auto node = std::make_shared< state_node >(); + node->_impl->_state = parent_state->_impl->_state->make_child( new_id, header ); + + std::unique_lock< std::timed_mutex > index_lock( _index_mutex, timeout ); + + // Ensure the parent node still exists in the index and then insert the child node + if( index_lock.owns_lock() && _index.find( parent_id ) != _index.end() + && _index.insert( node->_impl->_state ).second ) + { + node->_impl->_lock = lock; + return node; + } + } + } + } + + return state_node_ptr(); +} + +state_node_ptr database_impl::create_writable_node( const state_node_id& parent_id, + const state_node_id& new_id, + const protocol::block_header& header, + const unique_lock_ptr& lock ) +{ + KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); + ; + + // Needs to be configurable + auto timeout = std::chrono::system_clock::now() + std::chrono::seconds( 1 ); + + state_node_ptr parent_state = get_node( parent_id, lock ); + + if( parent_state ) + { + std::unique_lock< std::timed_mutex > cv_lock( parent_state->_impl->_state->cv_mutex(), timeout ); + + // We need to own the lock + if( cv_lock.owns_lock() ) + { + // Check if the node is finalized + bool is_finalized = parent_state->is_finalized(); + + // If the node is finalized, try to wait for the node to be finalized + if( !is_finalized + && parent_state->_impl->_state->cv().wait_until( cv_lock, timeout ) == std::cv_status::no_timeout ) + is_finalized = parent_state->is_finalized(); + + // Finally, if the node is finalized, we can create a new writable node with the desired parent + if( is_finalized ) + { + auto node = std::make_shared< state_node >(); + node->_impl->_state = parent_state->_impl->_state->make_child( new_id, header ); + + std::unique_lock< std::timed_mutex > index_lock( _index_mutex, timeout ); + + // Ensure the parent node still exists in the index and then insert the child node + if( index_lock.owns_lock() && _index.find( parent_id ) != _index.end() + && _index.insert( node->_impl->_state ).second ) + { + return node; + } + } + } + } + + return state_node_ptr(); +} + +state_node_ptr database_impl::clone_node( const state_node_id& node_id, + const state_node_id& new_id, + const protocol::block_header& header, + const shared_lock_ptr& lock ) +{ + KOINOS_ASSERT( verify_shared_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); + + auto node = get_node_lockless( node_id ); + KOINOS_ASSERT( node, illegal_argument, "node ${n} not found.", ( "n", node_id ) ); + KOINOS_ASSERT( !node->is_finalized(), illegal_argument, "cannot clone finalized node" ); + + auto new_node = std::make_shared< state_node >(); + new_node->_impl->_state = node->_impl->_state->clone( new_id, header ); + + if( _index.insert( new_node->_impl->_state ).second ) + { + new_node->_impl->_lock = lock; + return new_node; + } + + return state_node_ptr(); +} + +state_node_ptr database_impl::clone_node( const state_node_id& node_id, + const state_node_id& new_id, + const protocol::block_header& header, + const unique_lock_ptr& lock ) +{ + KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); + + auto node = get_node_lockless( node_id ); + KOINOS_ASSERT( node, illegal_argument, "node ${n} not found.", ( "n", node_id ) ); + KOINOS_ASSERT( !node->is_finalized(), illegal_argument, "cannot clone finalized node" ); + + auto new_node = std::make_shared< state_node >(); + new_node->_impl->_state = node->_impl->_state->clone( new_id, header ); + + if( _index.insert( new_node->_impl->_state ).second ) + { + return new_node; + } + + return state_node_ptr(); +} + +void database_impl::finalize_node( const state_node_id& node_id, const shared_lock_ptr& lock ) +{ + KOINOS_ASSERT( verify_shared_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); + auto node = get_node_lockless( node_id ); + KOINOS_ASSERT( node, illegal_argument, "node ${n} not found.", ( "n", node_id ) ); + + { + std::lock_guard< std::timed_mutex > index_lock( node->_impl->_state->cv_mutex() ); + + node->_impl->_state->finalize(); + } + + node->_impl->_state->cv().notify_all(); + + if( node->revision() > _head->revision() ) + { + _head = node->_impl->_state; + } + else if( node->revision() == _head->revision() ) + { + std::unique_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); + fork_list forks; + forks.reserve( _fork_heads.size() ); + std::transform( std::begin( _fork_heads ), + std::end( _fork_heads ), + std::back_inserter( forks ), + []( const auto& entry ) + { + state_node_ptr s = std::make_shared< state_node >(); + s->_impl->_state = entry.second; + return s; + } ); + + auto head = get_head_lockless(); + if( auto new_head = _comp( forks, head, node ); new_head != nullptr ) + { + _head = new_head->_impl->_state; + } + else + { + _head = head->parent()->_impl->_state; + auto head_itr = _fork_heads.find( head->id() ); + if( head_itr != std::end( _fork_heads ) ) + _fork_heads.erase( head_itr ); + _fork_heads.insert_or_assign( head->parent()->id(), _head ); + } + } + + // When node is finalized, parent node needs to be removed from heads, if it exists. + std::unique_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); + if( node->parent_id() != _head->id() ) + { + auto parent_itr = _fork_heads.find( node->parent_id() ); + if( parent_itr != std::end( _fork_heads ) ) + _fork_heads.erase( parent_itr ); + + _fork_heads.insert_or_assign( node->id(), node->_impl->_state ); + } +} + +void database_impl::finalize_node( const state_node_id& node_id, const unique_lock_ptr& lock ) +{ + KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); + auto node = get_node_lockless( node_id ); + KOINOS_ASSERT( node, illegal_argument, "node ${n} not found.", ( "n", node_id ) ); + + { + std::lock_guard< std::timed_mutex > index_lock( node->_impl->_state->cv_mutex() ); + + node->_impl->_state->finalize(); + } + + node->_impl->_state->cv().notify_all(); + + if( node->revision() > _head->revision() ) + { + _head = node->_impl->_state; + } + else if( node->revision() == _head->revision() ) + { + std::unique_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); + fork_list forks; + forks.reserve( _fork_heads.size() ); + std::transform( std::begin( _fork_heads ), + std::end( _fork_heads ), + std::back_inserter( forks ), + []( const auto& entry ) + { + state_node_ptr s = std::make_shared< state_node >(); + s->_impl->_state = entry.second; + return s; + } ); + + auto head = get_head_lockless(); + if( auto new_head = _comp( forks, head, node ); new_head != nullptr ) + { + _head = new_head->_impl->_state; + } + else + { + _head = head->parent()->_impl->_state; + auto head_itr = _fork_heads.find( head->id() ); + if( head_itr != std::end( _fork_heads ) ) + _fork_heads.erase( head_itr ); + _fork_heads.insert_or_assign( head->parent()->id(), _head ); + } + } + + // When node is finalized, parent node needs to be removed from heads, if it exists. + std::unique_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); + if( node->parent_id() != _head->id() ) + { + auto parent_itr = _fork_heads.find( node->parent_id() ); + if( parent_itr != std::end( _fork_heads ) ) + _fork_heads.erase( parent_itr ); + + _fork_heads.insert_or_assign( node->id(), node->_impl->_state ); + } +} + +void database_impl::discard_node( const state_node_id& node_id, + const std::unordered_set< state_node_id >& whitelist, + const shared_lock_ptr& lock ) +{ + KOINOS_ASSERT( verify_shared_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + std::unique_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); + discard_node_lockless( node_id, whitelist ); +} + +void database_impl::discard_node( const state_node_id& node_id, + const std::unordered_set< state_node_id >& whitelist, + const unique_lock_ptr& lock ) +{ + KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + std::unique_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); + discard_node_lockless( node_id, whitelist ); +} + +void database_impl::discard_node_lockless( const state_node_id& node_id, + const std::unordered_set< state_node_id >& whitelist ) +{ + KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); + auto node = get_node_lockless( node_id ); + + if( !node ) + return; + + KOINOS_ASSERT( node_id != _root->id(), illegal_argument, "cannot discard root node" ); + + std::vector< state_node_id > remove_queue{ node_id }; + const auto& previdx = _index.template get< by_parent >(); + const auto head_id = _head->id(); + + for( uint32_t i = 0; i < remove_queue.size(); ++i ) + { + KOINOS_ASSERT( remove_queue[ i ] != head_id, + cannot_discard, + "cannot discard a node that would result in discarding of head" ); + + auto previtr = previdx.lower_bound( remove_queue[ i ] ); + while( previtr != previdx.end() && ( *previtr )->parent_id() == remove_queue[ i ] ) + { + // Do not remove nodes on the whitelist + if( whitelist.find( ( *previtr )->id() ) == whitelist.end() ) + { + remove_queue.push_back( ( *previtr )->id() ); + } + + ++previtr; + } + + // We may discard one or more fork heads when discarding a minority fork tree + // For completeness, we'll check every node to see if it is a fork head + auto head_itr = _fork_heads.find( remove_queue[ i ] ); + if( head_itr != _fork_heads.end() ) + { + _fork_heads.erase( head_itr ); + } + } + + for( const auto& id: remove_queue ) + { + auto itr = _index.find( id ); + if( itr != _index.end() ) + _index.erase( itr ); + } + + // When node is discarded, if the parent node is not a parent of other nodes (no forks), add it to heads. + auto fork_itr = previdx.find( node->parent_id() ); + if( fork_itr == previdx.end() ) + { + auto parent_itr = _index.find( node->parent_id() ); + KOINOS_ASSERT( parent_itr != _index.end(), internal_error, "discarded parent node not found in node index" ); + _fork_heads.insert_or_assign( ( *parent_itr )->id(), *parent_itr ); + } +} + +void database_impl::commit_node( const state_node_id& node_id, const unique_lock_ptr& lock ) +{ + KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); + ; + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + std::unique_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); + KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); + + // If the node_id to commit is the root id, return. It is already committed. + if( node_id == _root->id() ) + return; + + auto node = get_node_lockless( node_id ); + KOINOS_ASSERT( node, illegal_argument, "node ${n} not found", ( "n", node_id ) ); + + auto old_root = _root; + _root = node->_impl->_state; + + _index.modify( _index.find( node_id ), + []( state_delta_ptr& n ) + { + n->commit(); + } ); + + std::unordered_set< state_node_id > whitelist{ node_id }; + discard_node_lockless( old_root->id(), whitelist ); +} + +state_node_ptr database_impl::get_head( const shared_lock_ptr& lock ) const +{ + KOINOS_ASSERT( verify_shared_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + + auto head = get_head_lockless(); + if( head ) + head->_impl->_lock = lock; + + return head; +} + +state_node_ptr database_impl::get_head( const unique_lock_ptr& lock ) const +{ + KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + + auto head = get_head_lockless(); + + return head; +} + +state_node_ptr database_impl::get_head_lockless() const +{ + KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); + auto head = std::make_shared< state_node >(); + head->_impl->_state = _head; + return head; +} + +std::vector< state_node_ptr > database_impl::get_fork_heads( const shared_lock_ptr& lock ) const +{ + KOINOS_ASSERT( verify_shared_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + std::shared_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); + KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); + std::vector< state_node_ptr > fork_heads; + fork_heads.reserve( _fork_heads.size() ); + + for( auto& head: _fork_heads ) + { + auto fork_head = std::make_shared< state_node >(); + fork_head->_impl->_state = head.second; + fork_head->_impl->_lock = lock; + fork_heads.push_back( fork_head ); + } + + return fork_heads; +} + +std::vector< state_node_ptr > database_impl::get_fork_heads( const unique_lock_ptr& lock ) const +{ + KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + std::shared_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); + KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); + std::vector< state_node_ptr > fork_heads; + fork_heads.reserve( _fork_heads.size() ); + + for( auto& head: _fork_heads ) + { + auto fork_head = std::make_shared< state_node >(); + fork_head->_impl->_state = head.second; + fork_heads.push_back( fork_head ); + } + + return fork_heads; +} + +std::vector< state_node_ptr > database_impl::get_all_nodes( const shared_lock_ptr& lock ) const +{ + KOINOS_ASSERT( verify_shared_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); + std::vector< state_node_ptr > nodes; + nodes.reserve( _index.size() ); + + for( const auto& delta: _index ) + { + auto node = std::make_shared< state_node >(); + node->_impl->_state = delta; + node->_impl->_lock = lock; + nodes.push_back( node ); + } + + return nodes; +} + +std::vector< state_node_ptr > database_impl::get_all_nodes( const unique_lock_ptr& lock ) const +{ + KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); + std::vector< state_node_ptr > nodes; + nodes.reserve( _index.size() ); + + for( const auto& delta: _index ) + { + auto node = std::make_shared< state_node >(); + node->_impl->_state = delta; + nodes.push_back( node ); + } + + return nodes; +} + +state_node_ptr database_impl::get_root( const shared_lock_ptr& lock ) const +{ + KOINOS_ASSERT( verify_shared_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + + auto root = get_root_lockless(); + if( root ) + root->_impl->_lock = lock; + + return root; +} + +state_node_ptr database_impl::get_root( const unique_lock_ptr& lock ) const +{ + KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + + auto root = get_root_lockless(); + + return root; +} + +state_node_ptr database_impl::get_root_lockless() const +{ + KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); + auto root = std::make_shared< state_node >(); + root->_impl->_state = _root; + return root; +} + +bool database_impl::is_open() const +{ + return (bool)_root && (bool)_head; +} + +const object_value* state_node_impl::get_object( const object_space& space, const object_key& key ) const +{ + chain::database_key db_key; + *db_key.mutable_space() = space; + db_key.set_key( key ); + auto key_string = util::converter::as< std::string >( db_key ); + + auto pobj = merge_state( _state ).find( key_string ); + + if( pobj != nullptr ) + { + return pobj; + } + + return nullptr; +} + +std::pair< const object_value*, const object_key > state_node_impl::get_next_object( const object_space& space, + const object_key& key ) const +{ + chain::database_key db_key; + *db_key.mutable_space() = space; + db_key.set_key( key ); + auto key_string = util::converter::as< std::string >( db_key ); + + auto state = merge_state( _state ); + auto it = state.lower_bound( key_string ); + + if( it != state.end() && it.key() == key_string ) + { + it++; + } + + if( it != state.end() ) + { + chain::database_key next_key = util::converter::to< chain::database_key >( it.key() ); + + if( next_key.space() == space ) + { + return { &*it, next_key.key() }; + } + } + + return { nullptr, null_key }; +} + +std::pair< const object_value*, const object_key > state_node_impl::get_prev_object( const object_space& space, + const object_key& key ) const +{ + chain::database_key db_key; + *db_key.mutable_space() = space; + db_key.set_key( key ); + auto key_string = util::converter::as< std::string >( db_key ); + + auto state = merge_state( _state ); + auto it = state.lower_bound( key_string ); + + if( it != state.begin() ) + { + --it; + chain::database_key next_key = util::converter::to< chain::database_key >( it.key() ); + + if( next_key.space() == space ) + { + return { &*it, next_key.key() }; + } + } + + return { nullptr, null_key }; +} + +int64_t state_node_impl::put_object( const object_space& space, const object_key& key, const object_value* val ) +{ + KOINOS_ASSERT( !_state->is_finalized(), node_finalized, "cannot write to a finalized node" ); + + chain::database_key db_key; + *db_key.mutable_space() = space; + db_key.set_key( key ); + auto key_string = util::converter::as< std::string >( db_key ); + + int64_t bytes_used = 0; + auto pobj = merge_state( _state ).find( key_string ); + + if( pobj != nullptr ) + bytes_used -= pobj->size(); + else + bytes_used += key_string.size(); + + bytes_used += val->size(); + _state->put( key_string, *val ); + + return bytes_used; +} + +int64_t state_node_impl::remove_object( const object_space& space, const object_key& key ) +{ + KOINOS_ASSERT( !_state->is_finalized(), node_finalized, "cannot write to a finalized node" ); + + chain::database_key db_key; + *db_key.mutable_space() = space; + db_key.set_key( key ); + auto key_string = util::converter::as< std::string >( db_key ); + + int64_t bytes_used = 0; + auto pobj = merge_state( _state ).find( key_string ); + + if( pobj != nullptr ) + { + bytes_used -= pobj->size(); + bytes_used -= key_string.size(); + } + + _state->erase( key_string ); + + return bytes_used; +} + +crypto::multihash state_node_impl::merkle_root() const +{ + return _state->merkle_root(); +} + +std::vector< protocol::state_delta_entry > state_node_impl::get_delta_entries() const +{ + return _state->get_delta_entries(); +} + +} // namespace detail + +abstract_state_node::abstract_state_node(): + _impl( new detail::state_node_impl() ) +{} + +abstract_state_node::~abstract_state_node() {} + +const object_value* abstract_state_node::get_object( const object_space& space, const object_key& key ) const +{ + return _impl->get_object( space, key ); +} + +std::pair< const object_value*, const object_key > abstract_state_node::get_next_object( const object_space& space, + const object_key& key ) const +{ + return _impl->get_next_object( space, key ); +} + +std::pair< const object_value*, const object_key > abstract_state_node::get_prev_object( const object_space& space, + const object_key& key ) const +{ + return _impl->get_prev_object( space, key ); +} + +int64_t abstract_state_node::put_object( const object_space& space, const object_key& key, const object_value* val ) +{ + return _impl->put_object( space, key, val ); +} + +int64_t abstract_state_node::remove_object( const object_space& space, const object_key& key ) +{ + return _impl->remove_object( space, key ); +} + +bool abstract_state_node::is_finalized() const +{ + return _impl->_state->is_finalized(); +} + +crypto::multihash abstract_state_node::merkle_root() const +{ + KOINOS_ASSERT( is_finalized(), koinos::exception, "node must be finalized to calculate merkle root" ); + return _impl->merkle_root(); +} + +std::vector< protocol::state_delta_entry > abstract_state_node::get_delta_entries() const +{ + return _impl->get_delta_entries(); +} + +anonymous_state_node_ptr abstract_state_node::create_anonymous_node() +{ + auto anonymous_node = std::make_shared< anonymous_state_node >(); + anonymous_node->_parent = shared_from_derived(); + anonymous_node->_impl->_state = _impl->_state->make_child(); + anonymous_node->_impl->_lock = _impl->_lock; + return anonymous_node; +} + +state_node::state_node(): + abstract_state_node() +{} + +state_node::~state_node() {} + +const state_node_id& state_node::id() const +{ + return _impl->_state->id(); +} + +const state_node_id& state_node::parent_id() const +{ + return _impl->_state->parent_id(); +} + +uint64_t state_node::revision() const +{ + return _impl->_state->revision(); +} + +abstract_state_node_ptr state_node::parent() const +{ + auto parent_delta = _impl->_state->parent(); + if( parent_delta ) + { + auto parent_node = std::make_shared< state_node >(); + parent_node->_impl->_state = parent_delta; + parent_node->_impl->_lock = _impl->_lock; + return parent_node; + } + + return abstract_state_node_ptr(); +} + +const protocol::block_header& state_node::block_header() const +{ + return _impl->_state->block_header(); +} + +abstract_state_node_ptr state_node::shared_from_derived() +{ + return shared_from_this(); +} + +anonymous_state_node::anonymous_state_node(): + abstract_state_node() +{} + +anonymous_state_node::anonymous_state_node::~anonymous_state_node() {} + +const state_node_id& anonymous_state_node::id() const +{ + return _parent->id(); +} + +const state_node_id& anonymous_state_node::parent_id() const +{ + return _parent->parent_id(); +} + +uint64_t anonymous_state_node::revision() const +{ + return _parent->revision(); +} + +abstract_state_node_ptr anonymous_state_node::parent() const +{ + return _parent; +} + +const protocol::block_header& anonymous_state_node::block_header() const +{ + return _parent->block_header(); +} + +void anonymous_state_node::commit() +{ + KOINOS_ASSERT( !_parent->is_finalized(), node_finalized, "cannot commit to a finalized node" ); + _impl->_state->squash(); + reset(); +} + +void anonymous_state_node::reset() +{ + _impl->_state = _impl->_state->make_child(); +} + +abstract_state_node_ptr anonymous_state_node::shared_from_derived() +{ + return shared_from_this(); +} + +state_node_ptr fifo_comparator( fork_list& forks, state_node_ptr current_head, state_node_ptr new_head ) +{ + return current_head; +} + +state_node_ptr block_time_comparator( fork_list& forks, state_node_ptr head_block, state_node_ptr new_block ) +{ + return new_block->block_header().timestamp() < head_block->block_header().timestamp() ? new_block : head_block; +} + +state_node_ptr pob_comparator( fork_list& forks, state_node_ptr head_block, state_node_ptr new_block ) +{ + if( head_block->block_header().signer() != new_block->block_header().signer() ) + return new_block->block_header().timestamp() < head_block->block_header().timestamp() ? new_block : head_block; + + auto it = std::find_if( std::begin( forks ), + std::end( forks ), + [ & ]( state_node_ptr p ) + { + return p->id() == head_block->id(); + } ); + if( it != std::end( forks ) ) + forks.erase( it ); + + struct + { + bool operator()( abstract_state_node_ptr a, abstract_state_node_ptr b ) const + { + if( a->revision() > b->revision() ) + return true; + else if( a->revision() < b->revision() ) + return false; + + if( a->block_header().timestamp() < b->block_header().timestamp() ) + return true; + else if( a->block_header().timestamp() > b->block_header().timestamp() ) + return false; + + if( a->id() < b->id() ) + return true; + + return false; + } + } priority_algorithm; + + if( std::size( forks ) ) + { + std::sort( std::begin( forks ), std::end( forks ), priority_algorithm ); + it = std::begin( forks ); + return priority_algorithm( head_block->parent(), *it ) ? state_node_ptr() : *it; + } + + return state_node_ptr(); +} + +database::database(): + impl( new detail::database_impl() ) +{} + +database::~database() {} + +shared_lock_ptr database::get_shared_lock() const +{ + return impl->get_shared_lock(); +} + +unique_lock_ptr database::get_unique_lock() const +{ + return impl->get_unique_lock(); +} + +void database::open( const std::optional< std::filesystem::path >& p, + genesis_init_function init, + fork_resolution_algorithm algo, + const unique_lock_ptr& lock ) +{ + impl->open( p, init, algo, lock ? lock : get_unique_lock() ); +} + +void database::open( const std::optional< std::filesystem::path >& p, + genesis_init_function init, + state_node_comparator_function comp, + const unique_lock_ptr& lock ) +{ + impl->open( p, init, comp, lock ? lock : get_unique_lock() ); +} + +void database::close( const unique_lock_ptr& lock ) +{ + impl->close( lock ? lock : get_unique_lock() ); +} + +void database::reset( const unique_lock_ptr& lock ) +{ + impl->reset( lock ? lock : get_unique_lock() ); +} + +state_node_ptr +database::get_node_at_revision( uint64_t revision, const state_node_id& child_id, const shared_lock_ptr& lock ) const +{ + return impl->get_node_at_revision( revision, child_id, lock ); +} + +state_node_ptr database::get_node_at_revision( uint64_t revision, const shared_lock_ptr& lock ) const +{ + static const state_node_id null_id; + return impl->get_node_at_revision( revision, null_id, lock ); +} + +state_node_ptr +database::get_node_at_revision( uint64_t revision, const state_node_id& child_id, const unique_lock_ptr& lock ) const +{ + return impl->get_node_at_revision( revision, child_id, lock ); +} + +state_node_ptr database::get_node_at_revision( uint64_t revision, const unique_lock_ptr& lock ) const +{ + static const state_node_id null_id; + return impl->get_node_at_revision( revision, null_id, lock ); +} + +state_node_ptr database::get_node( const state_node_id& node_id, const shared_lock_ptr& lock ) const +{ + return impl->get_node( node_id, lock ); +} + +state_node_ptr database::get_node( const state_node_id& node_id, const unique_lock_ptr& lock ) const +{ + return impl->get_node( node_id, lock ); +} + +state_node_ptr database::create_writable_node( const state_node_id& parent_id, + const state_node_id& new_id, + const protocol::block_header& header, + const shared_lock_ptr& lock ) +{ + return impl->create_writable_node( parent_id, new_id, header, lock ); +} + +state_node_ptr database::create_writable_node( const state_node_id& parent_id, + const state_node_id& new_id, + const protocol::block_header& header, + const unique_lock_ptr& lock ) +{ + return impl->create_writable_node( parent_id, new_id, header, lock ); +} + +state_node_ptr database::clone_node( const state_node_id& node_id, + const state_node_id& new_id, + const protocol::block_header& header, + const shared_lock_ptr& lock ) +{ + return impl->clone_node( node_id, new_id, header, lock ); +} + +state_node_ptr database::clone_node( const state_node_id& node_id, + const state_node_id& new_id, + const protocol::block_header& header, + const unique_lock_ptr& lock ) +{ + return impl->clone_node( node_id, new_id, header, lock ); +} + +void database::finalize_node( const state_node_id& node_id, const shared_lock_ptr& lock ) +{ + impl->finalize_node( node_id, lock ); +} + +void database::finalize_node( const state_node_id& node_id, const unique_lock_ptr& lock ) +{ + impl->finalize_node( node_id, lock ); +} + +void database::discard_node( const state_node_id& node_id, const shared_lock_ptr& lock ) +{ + static const std::unordered_set< state_node_id > whitelist; + impl->discard_node( node_id, whitelist, lock ); +} + +void database::discard_node( const state_node_id& node_id, const unique_lock_ptr& lock ) +{ + static const std::unordered_set< state_node_id > whitelist; + impl->discard_node( node_id, whitelist, lock ); +} + +void database::commit_node( const state_node_id& node_id, const unique_lock_ptr& lock ) +{ + impl->commit_node( node_id, lock ? lock : get_unique_lock() ); +} + +state_node_ptr database::get_head( const shared_lock_ptr& lock ) const +{ + return impl->get_head( lock ); +} + +state_node_ptr database::get_head( const unique_lock_ptr& lock ) const +{ + return impl->get_head( lock ); +} + +std::vector< state_node_ptr > database::get_fork_heads( const shared_lock_ptr& lock ) const +{ + return impl->get_fork_heads( lock ); +} + +std::vector< state_node_ptr > database::get_fork_heads( const unique_lock_ptr& lock ) const +{ + return impl->get_fork_heads( lock ); +} + +std::vector< state_node_ptr > database::get_all_nodes( const shared_lock_ptr& lock ) const +{ + return impl->get_all_nodes( lock ); +} + +std::vector< state_node_ptr > database::get_all_nodes( const unique_lock_ptr& lock ) const +{ + return impl->get_all_nodes( lock ); +} + +state_node_ptr database::get_root( const shared_lock_ptr& lock ) const +{ + return impl->get_root( lock ); +} + +state_node_ptr database::get_root( const unique_lock_ptr& lock ) const +{ + return impl->get_root( lock ); +} + +} // namespace koinos::state_db diff --git a/src/koinos/state_db/state_delta.cpp b/src/koinos/state_db/state_delta.cpp new file mode 100644 index 0000000..32571e6 --- /dev/null +++ b/src/koinos/state_db/state_delta.cpp @@ -0,0 +1,373 @@ +#include + +#include + +namespace koinos::state_db::detail { + +using backend_type = state_delta::backend_type; +using value_type = state_delta::value_type; + +state_delta::state_delta( const std::optional< std::filesystem::path >& p ) +{ + if( p ) + { + auto backend = std::make_shared< backends::rocksdb::rocksdb_backend >(); + backend->open( *p ); + _backend = backend; + } + else + { + _backend = std::make_shared< backends::map::map_backend >(); + } + + _revision = _backend->revision(); + _id = _backend->id(); + _merkle_root = _backend->merkle_root(); +} + +void state_delta::put( const key_type& k, const value_type& v ) +{ + _backend->put( k, v ); +} + +void state_delta::erase( const key_type& k ) +{ + if( find( k ) ) + { + _backend->erase( k ); + _removed_objects.insert( k ); + } +} + +const value_type* state_delta::find( const key_type& key ) const +{ + if( auto val_ptr = _backend->get( key ); val_ptr ) + return val_ptr; + + if( is_removed( key ) ) + return nullptr; + + return is_root() ? nullptr : _parent->find( key ); +} + +void state_delta::squash() +{ + if( is_root() ) + return; + + // If an object is removed here and exists in the parent, it needs to only be removed in the parent + // If an object is modified here, but removed in the parent, it needs to only be modified in the parent + // These are O(m log n) operations. Because of this, squash should only be called from anonymouse state + // nodes, whose modifications are much smaller + for( const key_type& r_key: _removed_objects ) + { + _parent->_backend->erase( r_key ); + + if( !_parent->is_root() ) + { + _parent->_removed_objects.insert( r_key ); + } + } + + for( auto itr = _backend->begin(); itr != _backend->end(); ++itr ) + { + _parent->_backend->put( itr.key(), *itr ); + + if( !_parent->is_root() ) + { + _parent->_removed_objects.erase( itr.key() ); + } + } +} + +void state_delta::commit() +{ + /** + * commit works in two distinct phases. The first is head recursion until we are at the root + * delta. At the root, we grab the backend and begin a write batch that will encompass all + * state writes and the final write of the metadata. + * + * The second phase is popping off the stack, writing state to the backend. After all deltas + * have been written to the backend, we write metadata to the backend and end the write batch. + * + * The result is this delta becomes the new root delta and state is written to the root backend + * atomically. + */ + KOINOS_ASSERT( !is_root(), internal_error, "cannot commit root" ); + + std::vector< std::shared_ptr< state_delta > > node_stack; + auto current_node = shared_from_this(); + + while( current_node ) + { + node_stack.push_back( current_node ); + current_node = current_node->_parent; + } + + // Because we already asserted we were not root, there will always exist a minimum of two nodes in the stack, + // this and root. + auto backend = node_stack.back()->_backend; + node_stack.back()->_backend.reset(); + node_stack.pop_back(); + + // Start the write batch + backend->start_write_batch(); + + // While there are nodes on the stack, write them to the backend + while( node_stack.size() ) + { + auto& node = node_stack.back(); + + for( const key_type& r_key: node->_removed_objects ) + { + backend->erase( r_key ); + } + + for( auto itr = node->_backend->begin(); itr != node->_backend->end(); ++itr ) + { + backend->put( itr.key(), *itr ); + } + + node_stack.pop_back(); + } + + // Update metadata on the backend + backend->set_block_header( block_header() ); + backend->set_revision( _revision ); + backend->set_id( _id ); + backend->set_merkle_root( merkle_root() ); + backend->store_metadata(); + + // End the write batch making the entire merge atomic + backend->end_write_batch(); + + // Reset local variables to match new status as root delta + _removed_objects.clear(); + _backend = backend; + _parent.reset(); +} + +void state_delta::clear() +{ + _backend->clear(); + _removed_objects.clear(); + + _revision = 0; + _id = crypto::multihash::zero( crypto::multicodec::sha2_256 ); +} + +bool state_delta::is_modified( const key_type& k ) const +{ + return _backend->get( k ) || _removed_objects.find( k ) != _removed_objects.end(); +} + +bool state_delta::is_removed( const key_type& k ) const +{ + return _removed_objects.find( k ) != _removed_objects.end(); +} + +bool state_delta::is_root() const +{ + return !_parent; +} + +uint64_t state_delta::revision() const +{ + return _revision; +} + +void state_delta::set_revision( uint64_t revision ) +{ + _revision = revision; + if( is_root() ) + { + _backend->set_revision( revision ); + } +} + +bool state_delta::is_finalized() const +{ + return _finalized; +} + +void state_delta::finalize() +{ + _finalized = true; +} + +std::condition_variable_any& state_delta::cv() +{ + return _cv; +} + +std::timed_mutex& state_delta::cv_mutex() +{ + return _cv_mutex; +} + +crypto::multihash state_delta::merkle_root() const +{ + if( !_merkle_root ) + { + std::vector< std::string > object_keys; + object_keys.reserve( _backend->size() + _removed_objects.size() ); + for( auto itr = _backend->begin(); itr != _backend->end(); ++itr ) + { + object_keys.push_back( itr.key() ); + } + + for( const auto& removed: _removed_objects ) + { + object_keys.push_back( removed ); + } + + std::sort( object_keys.begin(), object_keys.end() ); + + std::vector< crypto::multihash > merkle_leafs; + merkle_leafs.reserve( object_keys.size() * 2 ); + + for( const auto& key: object_keys ) + { + merkle_leafs.emplace_back( crypto::hash( crypto::multicodec::sha2_256, key ) ); + auto val_ptr = _backend->get( key ); + merkle_leafs.emplace_back( crypto::hash( crypto::multicodec::sha2_256, val_ptr ? *val_ptr : std::string() ) ); + } + + _merkle_root = crypto::merkle_tree( crypto::multicodec::sha2_256, merkle_leafs ).root()->hash(); + } + + return *_merkle_root; +} + +const protocol::block_header& state_delta::block_header() const +{ + return _backend->block_header(); +} + +std::shared_ptr< state_delta > state_delta::make_child( const state_node_id& id, const protocol::block_header& header ) +{ + auto child = std::make_shared< state_delta >(); + child->_parent = shared_from_this(); + child->_id = id; + child->_revision = _revision + 1; + child->_backend = std::make_shared< backends::map::map_backend >(); + child->_backend->set_block_header( header ); + + return child; +} + +std::shared_ptr< state_delta > state_delta::clone( const state_node_id& id, const protocol::block_header& header ) +{ + auto new_node = std::make_shared< state_delta >(); + new_node->_parent = _parent; + new_node->_backend = _backend->clone(); + new_node->_removed_objects = _removed_objects; + + new_node->_id = id; + new_node->_revision = _revision; + new_node->_merkle_root = _merkle_root; + + new_node->_finalized = _finalized; + + new_node->_backend->set_id( id ); + new_node->_backend->set_revision( _revision ); + new_node->_backend->set_block_header( header ); + + if( _merkle_root ) + { + new_node->_backend->set_merkle_root( *_merkle_root ); + } + + return new_node; +} + +const std::shared_ptr< backend_type > state_delta::backend() const +{ + return _backend; +} + +const state_node_id& state_delta::id() const +{ + return _id; +} + +const state_node_id& state_delta::parent_id() const +{ + static const state_node_id null_id; + return _parent ? _parent->_id : null_id; +} + +std::shared_ptr< state_delta > state_delta::parent() const +{ + return _parent; +} + +bool state_delta::is_empty() const +{ + if( _backend->size() ) + return false; + else if( _parent ) + return _parent->is_empty(); + + return true; +} + +std::shared_ptr< state_delta > state_delta::get_root() +{ + if( !is_root() ) + { + if( _parent->is_root() ) + return _parent; + else + return _parent->get_root(); + } + + return std::shared_ptr< state_delta >(); +} + +std::vector< protocol::state_delta_entry > state_delta::get_delta_entries() const +{ + std::vector< std::string > object_keys; + object_keys.reserve( _backend->size() + _removed_objects.size() ); + for( auto itr = _backend->begin(); itr != _backend->end(); ++itr ) + { + object_keys.push_back( itr.key() ); + } + + for( const auto& removed: _removed_objects ) + { + object_keys.push_back( removed ); + } + + std::sort( object_keys.begin(), object_keys.end() ); + + std::vector< protocol::state_delta_entry > deltas; + deltas.reserve( object_keys.size() ); + + for( const auto& key: object_keys ) + { + protocol::state_delta_entry entry; + + // Deserialize the key into a database_key object + koinos::chain::database_key db_key; + if( db_key.ParseFromString( key ) ) + { + entry.mutable_object_space()->set_system( db_key.space().system() ); + entry.mutable_object_space()->set_zone( db_key.space().zone() ); + entry.mutable_object_space()->set_id( db_key.space().id() ); + + entry.set_key( db_key.key() ); + auto value = _backend->get( key ); + + // Set the optional field if not null + if( value != nullptr ) + entry.set_value( *value ); + + deltas.push_back( entry ); + } + } + + return deltas; +} + +} // namespace koinos::state_db::detail diff --git a/src/koinos/state_db/state_delta.hpp b/src/koinos/state_db/state_delta.hpp new file mode 100644 index 0000000..82f1381 --- /dev/null +++ b/src/koinos/state_db/state_delta.hpp @@ -0,0 +1,89 @@ +#pragma once +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace koinos::state_db::detail { + +class state_delta: public std::enable_shared_from_this< state_delta > +{ +public: + using backend_type = backends::abstract_backend; + using key_type = backend_type::key_type; + using value_type = backend_type::value_type; + +private: + std::shared_ptr< state_delta > _parent; + + std::shared_ptr< backend_type > _backend; + std::unordered_set< key_type > _removed_objects; + + state_node_id _id; + uint64_t _revision = 0; + mutable std::optional< crypto::multihash > _merkle_root; + + bool _finalized = false; + + std::timed_mutex _cv_mutex; + std::condition_variable_any _cv; + +public: + state_delta() = default; + state_delta( const std::optional< std::filesystem::path >& p ); + ~state_delta() = default; + + void put( const key_type& k, const value_type& v ); + void erase( const key_type& k ); + const value_type* find( const key_type& key ) const; + + void squash(); + void commit(); + + void clear(); + + bool is_modified( const key_type& k ) const; + bool is_removed( const key_type& k ) const; + bool is_root() const; + bool is_empty() const; + + uint64_t revision() const; + void set_revision( uint64_t revision ); + + bool is_finalized() const; + void finalize(); + + std::condition_variable_any& cv(); + std::timed_mutex& cv_mutex(); + + crypto::multihash merkle_root() const; + std::vector< protocol::state_delta_entry > get_delta_entries() const; + + const state_node_id& id() const; + const state_node_id& parent_id() const; + std::shared_ptr< state_delta > parent() const; + const protocol::block_header& block_header() const; + + std::shared_ptr< state_delta > make_child( const state_node_id& id = state_node_id(), + const protocol::block_header& header = protocol::block_header() ); + std::shared_ptr< state_delta > clone( const state_node_id& id, const protocol::block_header& header ); + + const std::shared_ptr< backend_type > backend() const; + +private: + void commit_helper(); + + std::shared_ptr< state_delta > get_root(); +}; + +} // namespace koinos::state_db::detail diff --git a/tests/BoostTestTargetConfig.h b/tests/BoostTestTargetConfig.h deleted file mode 100644 index dd3cdda..0000000 --- a/tests/BoostTestTargetConfig.h +++ /dev/null @@ -1,7 +0,0 @@ -// Small header computed by CMake to set up boost test. -// include AFTER #define BOOST_TEST_MODULE whatever -// but before any other boost test includes. - -// Using the Boost UTF static library - -#include diff --git a/tests/BoostTestTargets.cmake b/tests/BoostTestTargets.cmake deleted file mode 100644 index 799c902..0000000 --- a/tests/BoostTestTargets.cmake +++ /dev/null @@ -1,242 +0,0 @@ -# - Add tests using boost::test -# -# Add this line to your test files in place of including a basic boost test header: -# #include -# -# If you cannot do that and must use the included form for a given test, -# include the line -# // OVERRIDE_BOOST_TEST_INCLUDED_WARNING -# in the same file with the boost test include. -# -# include(BoostTestTargets) -# add_boost_test( SOURCES [] -# [FAIL_REGULAR_EXPRESSION ] -# [LAUNCHER ] -# [LIBRARIES [...]] -# [RESOURCES [...]] -# [TESTS [...]]) -# -# If for some reason you need access to the executable target created, -# it can be found in ${${testdriver_name}_TARGET_NAME} as specified when -# you called add_boost_test -# -# Requires CMake 2.6 or newer (uses the 'function' command) -# -# Requires: -# GetForceIncludeDefinitions -# CopyResourcesToBuildTree -# -# Original Author: -# 2009-2010 Ryan Pavlik -# http://academic.cleardefinition.com -# Iowa State University HCI Graduate Program/VRAC -# -# Copyright Iowa State University 2009-2010. -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or copy at -# http://www.boost.org/LICENSE_1_0.txt) - -if(__add_boost_test) - return() -endif() -set(__add_boost_test YES) - -set(BOOST_TEST_TARGET_PREFIX "boosttest") - -if(NOT Boost_FOUND) - find_package(Boost 1.34.0 QUIET) -endif() - -include(GetForceIncludeDefinitions.cmake) -include(CopyResourcesToBuildTree.cmake) - -if(Boost_FOUND) - set(_boosttesttargets_libs) - set(_boostConfig "BoostTestTargetsIncluded.h") - if(NOT Boost_UNIT_TEST_FRAMEWORK_LIBRARY) - find_package(Boost 1.34.0 QUIET COMPONENTS unit_test_framework) - endif() - if(Boost_UNIT_TEST_FRAMEWORK_LIBRARY) - set(_boosttesttargets_libs "${Boost_UNIT_TEST_FRAMEWORK_LIBRARY}") - if(Boost_USE_STATIC_LIBS) - set(_boostConfig "BoostTestTargetsStatic.h") - else() - if(NOT APPLE) - set(_boostConfig "BoostTestTargetsDynamic.h") - endif() - endif() - endif() - get_filename_component(_moddir ${CMAKE_CURRENT_LIST_FILE} PATH) - configure_file("${_moddir}/${_boostConfig}" - "${CMAKE_CURRENT_BINARY_DIR}/BoostTestTargetConfig.h" - COPYONLY) - include_directories("${CMAKE_CURRENT_BINARY_DIR}") -endif() - -function(add_boost_test _name) - if(NOT BUILD_TESTING) - return() - endif() - - # parse arguments - set(_nowhere) - set(_curdest _nowhere) - set(_val_args - SOURCES - FAIL_REGULAR_EXPRESSION - LAUNCHER - LIBRARIES - RESOURCES - TESTS) - set(_bool_args - USE_COMPILED_LIBRARY) - foreach(_arg ${_val_args} ${_bool_args}) - set(${_arg}) - endforeach() - foreach(_element ${ARGN}) - list(FIND _val_args "${_element}" _val_arg_find) - list(FIND _bool_args "${_element}" _bool_arg_find) - if("${_val_arg_find}" GREATER "-1") - set(_curdest "${_element}") - elseif("${_bool_arg_find}" GREATER "-1") - set("${_element}" ON) - set(_curdest _nowhere) - else() - list(APPEND ${_curdest} "${_element}") - endif() - endforeach() - - if(_nowhere) - message(FATAL_ERROR "Syntax error in use of add_boost_test!") - endif() - - if(NOT SOURCES) - message(FATAL_ERROR - "Syntax error in use of add_boost_test: at least one source file required!") - endif() - - if(Boost_FOUND) - - include_directories(${Boost_INCLUDE_DIRS}) - - set(includeType) - foreach(src ${SOURCES}) - file(READ ${src} thefile) - if("${thefile}" MATCHES ".*BoostTestTargetConfig.h.*") - set(includeType CONFIGURED) - set(includeFileLoc ${src}) - break() - elseif("${thefile}" MATCHES ".*boost/test/included/unit_test.hpp.*") - set(includeType INCLUDED) - set(includeFileLoc ${src}) - set(_boosttesttargets_libs) # clear this out - linking would be a bad idea - if(NOT - "${thefile}" - MATCHES - ".*OVERRIDE_BOOST_TEST_INCLUDED_WARNING.*") - message("Please replace the include line in ${src} with this alternate include line instead:") - message(" \#include ") - message("Once you've saved your changes, re-run CMake. (See BoostTestTargets.cmake for more info)") - endif() - break() - endif() - endforeach() - - if(NOT _boostTestTargetsNagged${_name} STREQUAL "${includeType}") - if("${includeType}" STREQUAL "CONFIGURED") - message(STATUS - "Test '${_name}' uses the CMake-configurable form of the boost test framework - congrats! (Including File: ${includeFileLoc})") - elseif("${includeType}" STREQUAL "INCLUDED") - message("In test '${_name}': ${includeFileLoc} uses the 'included' form of the boost unit test framework.") - else() - message("In test '${_name}': Didn't detect the CMake-configurable boost test include.") - message("Please replace your existing boost test include in that test with the following:") - message(" \#include ") - message("Once you've saved your changes, re-run CMake. (See BoostTestTargets.cmake for more info)") - endif() - endif() - set(_boostTestTargetsNagged${_name} - "${includeType}" - CACHE - INTERNAL - "" - FORCE) - - - if(RESOURCES) - list(APPEND SOURCES ${RESOURCES}) - endif() - - # Generate a unique target name, using the relative binary dir - # and provided name. (transform all / into _ and remove all other - # non-alphabet characters) - file(RELATIVE_PATH - targetpath - "${CMAKE_BINARY_DIR}" - "${CMAKE_CURRENT_BINARY_DIR}") - string(REGEX REPLACE "[^A-Za-z/_]" "" targetpath "${targetpath}") - string(REPLACE "/" "_" targetpath "${targetpath}") - - set(_target_name ${_name}) - set(${_name}_TARGET_NAME "${_target_name}" PARENT_SCOPE) - - # Build the test. - add_executable(${_target_name} ${SOURCES}) - - list(APPEND LIBRARIES ${_boosttesttargets_libs}) - - if(LIBRARIES) - target_link_libraries(${_target_name} ${LIBRARIES}) - endif() - - if(RESOURCES) - set_property(TARGET ${_target_name} PROPERTY RESOURCE ${RESOURCES}) - copy_resources_to_build_tree(${_target_name}) - endif() - - if(NOT Boost_TEST_FLAGS) -# set(Boost_TEST_FLAGS --catch_system_error=yes --output_format=XML) - set(Boost_TEST_FLAGS --catch_system_error=yes) - endif() - - # TODO: Figure out why only recent boost handles individual test running properly - - if(LAUNCHER) - set(_test_command ${LAUNCHER} "\$") - else() - set(_test_command ${_target_name}) - endif() - - if(TESTS) - foreach(_test ${TESTS}) - add_test( - ${_name}-${_test} - ${_test_command} --run_test=${_test} ${Boost_TEST_FLAGS} - ) - if(FAIL_REGULAR_EXPRESSION) - set_tests_properties(${_name}-${_test} - PROPERTIES - FAIL_REGULAR_EXPRESSION - "${FAIL_REGULAR_EXPRESSION}") - endif() - endforeach() - else() - add_test( - ${_name}-boost_test - ${_test_command} ${Boost_TEST_FLAGS} - ) - if(FAIL_REGULAR_EXPRESSION) - set_tests_properties(${_name}-boost_test - PROPERTIES - FAIL_REGULAR_EXPRESSION - "${FAIL_REGULAR_EXPRESSION}") - endif() - endif() - - # CppCheck the test if we can. - if(COMMAND add_cppcheck) - add_cppcheck(${_target_name} STYLE UNUSED_FUNCTIONS) - endif() - - endif() -endfunction() diff --git a/tests/BoostTestTargetsDynamic.h b/tests/BoostTestTargetsDynamic.h deleted file mode 100644 index 4bff567..0000000 --- a/tests/BoostTestTargetsDynamic.h +++ /dev/null @@ -1,8 +0,0 @@ -// Small header computed by CMake to set up boost test. -// include AFTER #define BOOST_TEST_MODULE whatever -// but before any other boost test includes. - -// Using the Boost UTF dynamic library - -#define BOOST_TEST_DYN_LINK -#include diff --git a/tests/BoostTestTargetsIncluded.h b/tests/BoostTestTargetsIncluded.h deleted file mode 100644 index 253133c..0000000 --- a/tests/BoostTestTargetsIncluded.h +++ /dev/null @@ -1,7 +0,0 @@ -// Small header computed by CMake to set up boost test. -// include AFTER #define BOOST_TEST_MODULE whatever -// but before any other boost test includes. - -// Using the Boost UTF included framework - -#include diff --git a/tests/BoostTestTargetsStatic.h b/tests/BoostTestTargetsStatic.h deleted file mode 100644 index dd3cdda..0000000 --- a/tests/BoostTestTargetsStatic.h +++ /dev/null @@ -1,7 +0,0 @@ -// Small header computed by CMake to set up boost test. -// include AFTER #define BOOST_TEST_MODULE whatever -// but before any other boost test includes. - -// Using the Boost UTF static library - -#include diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index d4b29ed..76e0d13 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -1,61 +1,36 @@ -find_package(Gperftools QUIET) -if(GPERFTOOLS_FOUND) - message(STATUS "Found gperftools; compiling tests with TCMalloc") - list(APPEND PLATFORM_SPECIFIC_LIBS tcmalloc) +if (NOT BUILD_TESTING) + return() endif() include(CTest) -enable_testing() - -file(GLOB UNIT_TESTS "tests/*.cpp") -file(GLOB_RECURSE TEST_FIXTURES "include/*.hpp") - -include(BoostTestTargets.cmake) - -function(parse_unit_tests RESULT) - set(SOURCES) - foreach(_element ${ARGN}) - list(APPEND SOURCES "${_element}") - endforeach() - - set(tests) - - foreach(src ${SOURCES}) - file(READ ${src} thefile) - string(REGEX MATCH "BOOST_FIXTURE_TEST_SUITE\\([A-Za-z0-9_,<> ]*\\)" test_suite "${thefile}" ) - - if( NOT (test_suite STREQUAL "") ) - string(SUBSTRING "${test_suite}" 25 -1 test_suite) - string(FIND "${test_suite}" "," comma_loc ) - string(SUBSTRING "${test_suite}" 0 ${comma_loc} test_suite) - string(STRIP "${test_suite}" test_suite) - - string( REGEX MATCHALL "BOOST_AUTO_TEST_CASE\\([A-Za-z0-9_,<> ]*\\)" cases "${thefile}" ) - - foreach( test_case ${cases} ) - string(SUBSTRING "${test_case}" 21 -1 test_case) - string(FIND "${test_case}" ")" paren_loc ) - string(SUBSTRING "${test_case}" 0 ${paren_loc} test_case) - string(STRIP "${test_case}" test_case) - - list(APPEND tests "${test_suite}/${test_case}") - endforeach() - endif() - endforeach() - - set(${RESULT} ${tests} PARENT_SCOPE) -endfunction() - -parse_unit_tests(TEST_CASES ${UNIT_TESTS}) - -add_boost_test(koinos_state_db_tests - SOURCES ${UNIT_TESTS} ${TEST_FIXTURES} - TESTS ${TEST_CASES} -) - -target_link_libraries(koinos_state_db_tests Koinos::proto Koinos::crypto Koinos::state_db Koinos::log Koinos::util Koinos::exception ${PLATFORM_SPECIFIC_LIBS}) -target_include_directories(koinos_state_db_tests PUBLIC - $ - $ # /include -) +koinos_add_test( + state_db_tests + SOURCES + main.cpp + state_db_test.cpp) + +target_link_libraries( + state_db_tests + PRIVATE + state_db + Koinos::proto + Koinos::crypto + Koinos::log + Koinos::util + Koinos::exception) + +target_include_directories( + state_db_tests + PUBLIC + $ + $ + $) -# -# Requires CMake 2.6 or newer (uses the 'function' command) -# -# Original Author: -# 2009-2010 Ryan Pavlik -# http://academic.cleardefinition.com -# Iowa State University HCI Graduate Program/VRAC -# -# Copyright Iowa State University 2009-2010. -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or copy at -# http://www.boost.org/LICENSE_1_0.txt) - -if(__copy_resources_to_build_tree) - return() -endif() -set(__copy_resources_to_build_tree YES) - -function(copy_resources_to_build_tree _target) - get_target_property(_resources ${_target} RESOURCE) - if(NOT _resources) - # Bail if no resources - message(STATUS - "Told to copy resources for target ${_target}, but " - "no resources are set!") - return() - endif() - - get_target_property(_path ${_target} LOCATION) - get_filename_component(_path "${_path}" PATH) - - if(NOT MSVC AND NOT "${CMAKE_GENERATOR}" MATCHES "Makefiles") - foreach(_config ${CMAKE_CONFIGURATION_TYPES}) - get_target_property(_path${_config} ${_target} LOCATION_${_config}) - get_filename_component(_path${_config} "${_path${_config}}" PATH) - add_custom_command(TARGET ${_target} - POST_BUILD - COMMAND - ${CMAKE_COMMAND} - ARGS -E make_directory "${_path${_config}}/" - COMMENT "Creating directory ${_path${_config}}/") - endforeach() - endif() - - foreach(_res ${_resources}) - if(NOT IS_ABSOLUTE "${_res}") - get_filename_component(_res "${_res}" ABSOLUTE) - endif() - get_filename_component(_name "${_res}" NAME) - - if(MSVC) - # Working dir is solution file dir, not exe file dir. - add_custom_command(TARGET ${_target} - POST_BUILD - COMMAND - ${CMAKE_COMMAND} - ARGS -E copy "${_res}" "${CMAKE_BINARY_DIR}/" - COMMENT "Copying ${_name} to ${CMAKE_BINARY_DIR}/ for MSVC") - else() - if("${CMAKE_GENERATOR}" MATCHES "Makefiles") - add_custom_command(TARGET ${_target} - POST_BUILD - COMMAND - ${CMAKE_COMMAND} - ARGS -E copy "${_res}" "${_path}/" - COMMENT "Copying ${_name} to ${_path}/") - else() - foreach(_config ${CMAKE_CONFIGURATION_TYPES}) - add_custom_command(TARGET ${_target} - POST_BUILD - COMMAND - ${CMAKE_COMMAND} - ARGS -E copy "${_res}" "${_path${_config}}" - COMMENT "Copying ${_name} to ${_path${_config}}") - endforeach() - - endif() - endif() - endforeach() -endfunction() diff --git a/tests/GetForceIncludeDefinitions.cmake b/tests/GetForceIncludeDefinitions.cmake deleted file mode 100644 index efcca04..0000000 --- a/tests/GetForceIncludeDefinitions.cmake +++ /dev/null @@ -1,44 +0,0 @@ -# - Get the platform-appropriate flags to add to force inclusion of a file -# -# The most common use of this is to use a generated config.h-type file -# placed out of the source tree in all files. -# -# get_force_include_definitions(var forcedincludefiles...) - -# where var is the name of your desired output variable, and everything -# else is a source file to forcibly include. -# a list item to be filtered. -# -# Original Author: -# 2009-2010 Ryan Pavlik -# http://academic.cleardefinition.com -# Iowa State University HCI Graduate Program/VRAC -# -# Copyright Iowa State University 2009-2010. -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or copy at -# http://www.boost.org/LICENSE_1_0.txt) - -if(__get_force_include_definitions) - return() -endif() -set(__get_force_include_definitions YES) - -function(get_force_include_definitions var) - set(_flagprefix) - if(CMAKE_COMPILER_IS_GNUCXX) - set(_flag "-include") - elseif(MSVC) - set(_flag "/FI") - else() - message(SEND_ERROR "You don't seem to be using MSVC or GCC, but") - message(SEND_ERROR "the project called get_force_include_definitions.") - message(SEND_ERROR "Contact this project with the name of your") - message(FATAL_ERROR "compiler and preferably the flag to force includes") - endif() - - set(_out) - foreach(_item ${ARGN}) - list(APPEND _out "${_flag} \"${_item}\"") - endforeach() - set(${var} "${_out}" PARENT_SCOPE) -endfunction() diff --git a/tests/main.cpp b/tests/main.cpp new file mode 100644 index 0000000..78f488d --- /dev/null +++ b/tests/main.cpp @@ -0,0 +1,3 @@ +#define BOOST_TEST_MODULE state_db_tests +#include +#include diff --git a/tests/state_db_test.cpp b/tests/state_db_test.cpp new file mode 100644 index 0000000..1bf61a4 --- /dev/null +++ b/tests/state_db_test.cpp @@ -0,0 +1,1919 @@ +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +using namespace koinos; +using namespace koinos::state_db; +using state_db::detail::merge_state; +using state_db::detail::state_delta; +using namespace std::string_literals; + +struct test_block +{ + std::string previous; + uint64_t height = 0; + uint64_t nonce = 0; + + crypto::multihash get_id() const; +}; + +crypto::multihash test_block::get_id() const +{ + return crypto::hash( crypto::multicodec::sha2_256, + util::converter::to< crypto::multihash >( previous ), + height, + nonce ); +} + +struct state_db_fixture +{ + state_db_fixture() + { + initialize_logging( "koinos_test", {}, "info" ); + + temp = std::filesystem::temp_directory_path() / util::random_alphanumeric( 8 ); + std::filesystem::create_directory( temp ); + + db.open( + temp, + [ & ]( state_db::state_node_ptr root ) {}, + fork_resolution_algorithm::fifo, + db.get_unique_lock() ); + } + + ~state_db_fixture() + { + boost::log::core::get()->remove_all_sinks(); + db.close( db.get_unique_lock() ); + std::filesystem::remove_all( temp ); + } + + database db; + std::filesystem::path temp; +}; + +BOOST_FIXTURE_TEST_SUITE( state_db_tests, state_db_fixture ) + +BOOST_AUTO_TEST_CASE( basic_test ) +{ + try + { + BOOST_TEST_MESSAGE( "Creating object" ); + object_space space; + std::string a_key = "a"; + std::string a_val = "alice"; + + auto shared_db_lock = db.get_shared_lock(); + + chain::database_key db_key; + *db_key.mutable_space() = space; + db_key.set_key( a_key ); + auto key_size = util::converter::as< std::string >( db_key ).size(); + + crypto::multihash state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); + auto state_1 = db.create_writable_node( db.get_head( shared_db_lock )->id(), + state_id, + protocol::block_header(), + shared_db_lock ); + BOOST_REQUIRE( state_1 ); + BOOST_CHECK_EQUAL( state_1->put_object( space, a_key, &a_val ), a_val.size() + key_size ); + + // Object should not exist on older state node + BOOST_CHECK_EQUAL( db.get_root( shared_db_lock )->get_object( space, a_key ), nullptr ); + + auto ptr = state_1->get_object( space, a_key ); + BOOST_REQUIRE( ptr ); + BOOST_CHECK_EQUAL( *ptr, a_val ); + + BOOST_TEST_MESSAGE( "Modifying object" ); + + a_val = "alicia"; + BOOST_CHECK_EQUAL( state_1->put_object( space, a_key, &a_val ), 1 ); + + ptr = state_1->get_object( space, a_key ); + BOOST_REQUIRE( ptr ); + BOOST_CHECK_EQUAL( *ptr, a_val ); + + state_id = crypto::hash( crypto::multicodec::sha2_256, 2 ); + auto state_2 = db.create_writable_node( state_1->id(), state_id, protocol::block_header(), shared_db_lock ); + BOOST_CHECK( !state_2 ); + + db.finalize_node( state_1->id(), shared_db_lock ); + + BOOST_REQUIRE_THROW( state_1->put_object( space, a_key, &a_val ), node_finalized ); + + state_2 = db.create_writable_node( state_1->id(), state_id, protocol::block_header(), shared_db_lock ); + BOOST_REQUIRE( state_2 ); + a_val = "alex"; + BOOST_CHECK_EQUAL( state_2->put_object( space, a_key, &a_val ), -2 ); + + ptr = state_2->get_object( space, a_key ); + BOOST_REQUIRE( ptr ); + BOOST_CHECK_EQUAL( *ptr, a_val ); + + ptr = state_1->get_object( space, a_key ); + BOOST_REQUIRE( ptr ); + BOOST_CHECK_EQUAL( *ptr, "alicia" ); + + BOOST_TEST_MESSAGE( "Erasing object" ); + state_2->remove_object( space, a_key ); + + BOOST_CHECK( !state_2->get_object( space, a_key ) ); + + db.discard_node( state_2->id(), shared_db_lock ); + state_2 = db.get_node( state_2->id(), shared_db_lock ); + BOOST_CHECK( !state_2 ); + + ptr = state_1->get_object( space, a_key ); + BOOST_REQUIRE( ptr ); + BOOST_CHECK_EQUAL( *ptr, "alicia" ); + } + KOINOS_CATCH_LOG_AND_RETHROW( info ) +} + +BOOST_AUTO_TEST_CASE( fork_tests ) +{ + try + { + BOOST_TEST_MESSAGE( "Basic fork tests on state_db" ); + crypto::multihash id, prev_id, block_1000_id; + test_block b; + + auto shared_db_lock = db.get_shared_lock(); + + prev_id = db.get_root( shared_db_lock )->id(); + + for( uint64_t i = 1; i <= 2'000; ++i ) + { + b.previous = util::converter::as< std::string >( prev_id ); + b.height = i; + id = b.get_id(); + + auto new_block = db.create_writable_node( prev_id, id, protocol::block_header(), shared_db_lock ); + BOOST_CHECK_EQUAL( b.height, new_block->revision() ); + db.finalize_node( id, shared_db_lock ); + + prev_id = id; + + if( i == 1'000 ) + block_1000_id = id; + } + + BOOST_REQUIRE( db.get_root( shared_db_lock )->id() == crypto::multihash::zero( crypto::multicodec::sha2_256 ) ); + BOOST_REQUIRE( db.get_root( shared_db_lock )->revision() == 0 ); + + BOOST_REQUIRE( db.get_head( shared_db_lock )->id() == prev_id ); + BOOST_REQUIRE( db.get_head( shared_db_lock )->revision() == 2'000 ); + + BOOST_REQUIRE( db.get_node( block_1000_id, shared_db_lock )->id() == block_1000_id ); + BOOST_REQUIRE( db.get_node( block_1000_id, shared_db_lock )->revision() == 1'000 ); + + auto fork_heads = db.get_fork_heads( shared_db_lock ); + BOOST_REQUIRE_EQUAL( fork_heads.size(), 1 ); + BOOST_REQUIRE( fork_heads[ 0 ]->id() == db.get_head( shared_db_lock )->id() ); + fork_heads.clear(); + + BOOST_TEST_MESSAGE( "Test commit" ); + shared_db_lock.reset(); + db.commit_node( block_1000_id, db.get_unique_lock() ); + shared_db_lock = db.get_shared_lock(); + BOOST_REQUIRE( db.get_root( shared_db_lock )->id() == block_1000_id ); + BOOST_REQUIRE( db.get_root( shared_db_lock )->revision() == 1'000 ); + + fork_heads = db.get_fork_heads( shared_db_lock ); + BOOST_REQUIRE_EQUAL( fork_heads.size(), 1 ); + BOOST_REQUIRE( fork_heads[ 0 ]->id() == db.get_head( shared_db_lock )->id() ); + + crypto::multihash block_2000_id = id; + + BOOST_TEST_MESSAGE( "Test discard" ); + b.previous = util::converter::as< std::string >( db.get_head( shared_db_lock )->id() ); + b.height = db.get_head( shared_db_lock )->revision() + 1; + id = b.get_id(); + db.create_writable_node( util::converter::to< crypto::multihash >( b.previous ), + id, + protocol::block_header(), + shared_db_lock ); + auto new_block = db.get_node( id, shared_db_lock ); + BOOST_REQUIRE( new_block ); + + fork_heads = db.get_fork_heads( shared_db_lock ); + BOOST_REQUIRE_EQUAL( fork_heads.size(), 1 ); + BOOST_REQUIRE( fork_heads[ 0 ]->id() == prev_id ); + + db.discard_node( id, shared_db_lock ); + + BOOST_REQUIRE( db.get_head( shared_db_lock )->id() == prev_id ); + BOOST_REQUIRE( db.get_head( shared_db_lock )->revision() == 2'000 ); + + fork_heads = db.get_fork_heads( shared_db_lock ); + BOOST_REQUIRE_EQUAL( fork_heads.size(), 1 ); + BOOST_REQUIRE( fork_heads[ 0 ]->id() == prev_id ); + + // Shared ptr should still exist, but not be returned with get_node + BOOST_REQUIRE( new_block ); + BOOST_REQUIRE( !db.get_node( id, shared_db_lock ) ); + new_block.reset(); + + // Cannot discard head + BOOST_REQUIRE_THROW( db.discard_node( prev_id, shared_db_lock ), cannot_discard ); + + BOOST_TEST_MESSAGE( "Check duplicate node creation" ); + BOOST_REQUIRE( !db.create_writable_node( db.get_head( shared_db_lock )->parent_id(), + db.get_head( shared_db_lock )->id(), + protocol::block_header(), + shared_db_lock ) ); + + BOOST_TEST_MESSAGE( "Check failed linking" ); + crypto::multihash zero = crypto::multihash::zero( crypto::multicodec::sha2_256 ); + BOOST_REQUIRE( !db.create_writable_node( zero, id, protocol::block_header(), shared_db_lock ) ); + + crypto::multihash head_id = db.get_head( shared_db_lock )->id(); + uint64_t head_rev = db.get_head( shared_db_lock )->revision(); + + BOOST_TEST_MESSAGE( "Test minority fork" ); + auto fork_node = db.get_node_at_revision( 1'995, shared_db_lock ); + prev_id = fork_node->id(); + b.nonce = 1; + + auto old_block_1996_id = db.get_node_at_revision( 1'996, shared_db_lock )->id(); + auto old_block_1997_id = db.get_node_at_revision( 1'997, shared_db_lock )->id(); + + for( uint64_t i = 1; i <= 5; ++i ) + { + b.previous = util::converter::as< std::string >( prev_id ); + b.height = fork_node->revision() + i; + id = b.get_id(); + + auto new_block = db.create_writable_node( prev_id, id, protocol::block_header(), shared_db_lock ); + BOOST_CHECK_EQUAL( b.height, new_block->revision() ); + db.finalize_node( id, shared_db_lock ); + + BOOST_CHECK( db.get_head( shared_db_lock )->id() == head_id ); + BOOST_CHECK( db.get_head( shared_db_lock )->revision() == head_rev ); + + prev_id = id; + } + + fork_heads = db.get_fork_heads( shared_db_lock ); + BOOST_REQUIRE_EQUAL( fork_heads.size(), 2 ); + BOOST_REQUIRE( ( fork_heads[ 0 ]->id() == db.get_head( shared_db_lock )->id() && fork_heads[ 1 ]->id() == id ) + || ( fork_heads[ 1 ]->id() == db.get_head( shared_db_lock )->id() && fork_heads[ 0 ]->id() == id ) ); + auto old_head_id = db.get_head( shared_db_lock )->id(); + + b.previous = util::converter::as< std::string >( prev_id ); + b.height = head_rev + 1; + id = b.get_id(); + + // When this node finalizes, it will be the longest path and should become head + new_block = db.create_writable_node( prev_id, id, protocol::block_header(), shared_db_lock ); + BOOST_CHECK_EQUAL( b.height, new_block->revision() ); + + BOOST_CHECK( db.get_head( shared_db_lock )->id() == head_id ); + BOOST_CHECK( db.get_head( shared_db_lock )->revision() == head_rev ); + + db.finalize_node( id, shared_db_lock ); + + fork_heads = db.get_fork_heads( shared_db_lock ); + BOOST_REQUIRE_EQUAL( fork_heads.size(), 2 ); + BOOST_REQUIRE( ( fork_heads[ 0 ]->id() == id && fork_heads[ 1 ]->id() == old_head_id ) + || ( fork_heads[ 1 ]->id() == id && fork_heads[ 0 ]->id() == old_head_id ) ); + + BOOST_CHECK( db.get_head( shared_db_lock )->id() == id ); + BOOST_CHECK( db.get_head( shared_db_lock )->revision() == b.height ); + + db.discard_node( old_block_1997_id, shared_db_lock ); + fork_heads = db.get_fork_heads( shared_db_lock ); + BOOST_REQUIRE_EQUAL( fork_heads.size(), 2 ); + BOOST_REQUIRE( ( fork_heads[ 0 ]->id() == id && fork_heads[ 1 ]->id() == old_block_1996_id ) + || ( fork_heads[ 1 ]->id() == id && fork_heads[ 0 ]->id() == old_block_1996_id ) ); + + db.discard_node( old_block_1996_id, shared_db_lock ); + fork_heads = db.get_fork_heads( shared_db_lock ); + BOOST_REQUIRE_EQUAL( fork_heads.size(), 1 ); + BOOST_REQUIRE( fork_heads[ 0 ]->id() == id ); + } + KOINOS_CATCH_LOG_AND_RETHROW( info ) +} + +BOOST_AUTO_TEST_CASE( merge_iterator ) +{ + try + { + std::filesystem::path temp = std::filesystem::temp_directory_path() / koinos::util::random_alphanumeric( 8 ); + std::filesystem::create_directory( temp ); + + using state_delta_ptr = std::shared_ptr< state_delta >; + std::deque< state_delta_ptr > delta_queue; + delta_queue.emplace_back( std::make_shared< state_delta >( temp ) ); + + // alice: 1 + // bob: 2 + // charlie: 3 + delta_queue.back()->put( "alice", "1" ); + delta_queue.back()->put( "bob", "2" ); + delta_queue.back()->put( "charlie", "3" ); + + { + merge_state m_state( delta_queue.back() ); + auto itr = m_state.begin(); + + BOOST_REQUIRE( itr != m_state.end() ); + BOOST_CHECK_EQUAL( itr.key(), "alice" ); + BOOST_CHECK_EQUAL( *itr, "1" ); + ++itr; + BOOST_CHECK_EQUAL( itr.key(), "bob" ); + BOOST_CHECK_EQUAL( *itr, "2" ); + ++itr; + BOOST_CHECK_EQUAL( itr.key(), "charlie" ); + BOOST_CHECK_EQUAL( *itr, "3" ); + ++itr; + BOOST_REQUIRE( itr == m_state.end() ); + BOOST_CHECK_THROW( *itr, koinos::exception ); + BOOST_CHECK_THROW( ++itr, koinos::exception ); + BOOST_CHECK_THROW( itr.key(), koinos::exception ); + --itr; + BOOST_CHECK_EQUAL( itr.key(), "charlie" ); + BOOST_CHECK_EQUAL( *itr, "3" ); + --itr; + BOOST_CHECK_EQUAL( itr.key(), "bob" ); + BOOST_CHECK_EQUAL( *itr, "2" ); + --itr; + BOOST_CHECK_EQUAL( itr.key(), "alice" ); + BOOST_CHECK_EQUAL( *itr, "1" ); + } + + // alice: 4 + // bob: 5 + // charlie: 3 (not changed) + delta_queue.emplace_back( delta_queue.back()->make_child( delta_queue.back()->id() ) ); + delta_queue.back()->put( "alice", "4" ); + delta_queue.back()->put( "bob", "5" ); + + { + merge_state m_state( delta_queue.back() ); + auto itr = m_state.begin(); + + BOOST_REQUIRE( itr != m_state.end() ); + BOOST_CHECK_EQUAL( itr.key(), "alice" ); + BOOST_CHECK_EQUAL( *itr, "4" ); + ++itr; + BOOST_CHECK_EQUAL( itr.key(), "bob" ); + BOOST_CHECK_EQUAL( *itr, "5" ); + ++itr; + BOOST_CHECK_EQUAL( itr.key(), "charlie" ); + BOOST_CHECK_EQUAL( *itr, "3" ); + ++itr; + BOOST_REQUIRE( itr == m_state.end() ); + BOOST_CHECK_THROW( *itr, koinos::exception ); + BOOST_CHECK_THROW( ++itr, koinos::exception ); + BOOST_CHECK_THROW( itr.key(), koinos::exception ); + --itr; + BOOST_CHECK_EQUAL( itr.key(), "charlie" ); + BOOST_CHECK_EQUAL( *itr, "3" ); + --itr; + BOOST_CHECK_EQUAL( itr.key(), "bob" ); + BOOST_CHECK_EQUAL( *itr, "5" ); + --itr; + BOOST_CHECK_EQUAL( itr.key(), "alice" ); + BOOST_CHECK_EQUAL( *itr, "4" ); + } + + // alice: 4 (not changed) + // bob: 6 + // charlie: 3 (not changed) + delta_queue.emplace_back( delta_queue.back()->make_child( delta_queue.back()->id() ) ); + delta_queue.back()->put( "bob", "6" ); + + { + merge_state m_state( delta_queue.back() ); + auto itr = m_state.begin(); + + BOOST_REQUIRE( itr != m_state.end() ); + BOOST_CHECK_EQUAL( itr.key(), "alice" ); + BOOST_CHECK_EQUAL( *itr, "4" ); + ++itr; + BOOST_CHECK_EQUAL( itr.key(), "bob" ); + BOOST_CHECK_EQUAL( *itr, "6" ); + ++itr; + BOOST_CHECK_EQUAL( itr.key(), "charlie" ); + BOOST_CHECK_EQUAL( *itr, "3" ); + ++itr; + BOOST_REQUIRE( itr == m_state.end() ); + BOOST_CHECK_THROW( *itr, koinos::exception ); + BOOST_CHECK_THROW( ++itr, koinos::exception ); + BOOST_CHECK_THROW( itr.key(), koinos::exception ); + --itr; + BOOST_CHECK_EQUAL( itr.key(), "charlie" ); + BOOST_CHECK_EQUAL( *itr, "3" ); + --itr; + BOOST_CHECK_EQUAL( itr.key(), "bob" ); + BOOST_CHECK_EQUAL( *itr, "6" ); + --itr; + BOOST_CHECK_EQUAL( itr.key(), "alice" ); + BOOST_CHECK_EQUAL( *itr, "4" ); + } + + // alice: (removed) + // bob: 6 (not changed) + // charlie: 3 (not changed) + delta_queue.emplace_back( delta_queue.back()->make_child( delta_queue.back()->id() ) ); + delta_queue.back()->erase( "alice" ); + + { + merge_state m_state( delta_queue.back() ); + auto itr = m_state.begin(); + + BOOST_REQUIRE( itr != m_state.end() ); + BOOST_CHECK_EQUAL( itr.key(), "bob" ); + BOOST_CHECK_EQUAL( *itr, "6" ); + ++itr; + BOOST_CHECK_EQUAL( itr.key(), "charlie" ); + BOOST_CHECK_EQUAL( *itr, "3" ); + ++itr; + BOOST_REQUIRE( itr == m_state.end() ); + BOOST_CHECK_THROW( *itr, koinos::exception ); + BOOST_CHECK_THROW( ++itr, koinos::exception ); + BOOST_CHECK_THROW( itr.key(), koinos::exception ); + --itr; + BOOST_CHECK_EQUAL( itr.key(), "charlie" ); + BOOST_CHECK_EQUAL( *itr, "3" ); + --itr; + BOOST_CHECK_EQUAL( itr.key(), "bob" ); + BOOST_CHECK_EQUAL( *itr, "6" ); + } + + // alice: 4 (restored) + // bob: 6 (not changed) + // charlie: 3 (not changed) + delta_queue.emplace_back( delta_queue.back()->make_child( delta_queue.back()->id() ) ); + delta_queue.back()->put( "alice", "4" ); + + { + merge_state m_state( delta_queue.back() ); + auto itr = m_state.begin(); + + BOOST_REQUIRE( itr != m_state.end() ); + BOOST_CHECK_EQUAL( itr.key(), "alice" ); + BOOST_CHECK_EQUAL( *itr, "4" ); + ++itr; + BOOST_CHECK_EQUAL( itr.key(), "bob" ); + BOOST_CHECK_EQUAL( *itr, "6" ); + ++itr; + BOOST_CHECK_EQUAL( itr.key(), "charlie" ); + BOOST_CHECK_EQUAL( *itr, "3" ); + ++itr; + BOOST_REQUIRE( itr == m_state.end() ); + BOOST_CHECK_THROW( *itr, koinos::exception ); + BOOST_CHECK_THROW( ++itr, koinos::exception ); + BOOST_CHECK_THROW( itr.key(), koinos::exception ); + --itr; + BOOST_CHECK_EQUAL( itr.key(), "charlie" ); + BOOST_CHECK_EQUAL( *itr, "3" ); + --itr; + BOOST_CHECK_EQUAL( itr.key(), "bob" ); + BOOST_CHECK_EQUAL( *itr, "6" ); + --itr; + BOOST_CHECK_EQUAL( itr.key(), "alice" ); + BOOST_CHECK_EQUAL( *itr, "4" ); + } + + delta_queue.pop_front(); + delta_queue.pop_front(); + delta_queue.front()->commit(); + + { + merge_state m_state( delta_queue.back() ); + auto itr = m_state.begin(); + + BOOST_REQUIRE( itr != m_state.end() ); + BOOST_CHECK_EQUAL( itr.key(), "alice" ); + BOOST_CHECK_EQUAL( *itr, "4" ); + ++itr; + BOOST_CHECK_EQUAL( itr.key(), "bob" ); + BOOST_CHECK_EQUAL( *itr, "6" ); + ++itr; + BOOST_CHECK_EQUAL( itr.key(), "charlie" ); + BOOST_CHECK_EQUAL( *itr, "3" ); + ++itr; + BOOST_REQUIRE( itr == m_state.end() ); + BOOST_CHECK_THROW( *itr, koinos::exception ); + BOOST_CHECK_THROW( ++itr, koinos::exception ); + BOOST_CHECK_THROW( itr.key(), koinos::exception ); + --itr; + BOOST_CHECK_EQUAL( itr.key(), "charlie" ); + BOOST_CHECK_EQUAL( *itr, "3" ); + --itr; + BOOST_CHECK_EQUAL( itr.key(), "bob" ); + BOOST_CHECK_EQUAL( *itr, "6" ); + --itr; + BOOST_CHECK_EQUAL( itr.key(), "alice" ); + BOOST_CHECK_EQUAL( *itr, "4" ); + } + + while( delta_queue.size() > 1 ) + { + delta_queue.pop_front(); + delta_queue.front()->commit(); + + merge_state m_state( delta_queue.back() ); + auto itr = m_state.begin(); + + BOOST_REQUIRE( itr != m_state.end() ); + BOOST_CHECK_EQUAL( itr.key(), "alice" ); + BOOST_CHECK_EQUAL( *itr, "4" ); + ++itr; + BOOST_CHECK_EQUAL( itr.key(), "bob" ); + BOOST_CHECK_EQUAL( *itr, "6" ); + ++itr; + BOOST_CHECK_EQUAL( itr.key(), "charlie" ); + BOOST_CHECK_EQUAL( *itr, "3" ); + ++itr; + BOOST_REQUIRE( itr == m_state.end() ); + BOOST_CHECK_THROW( *itr, koinos::exception ); + BOOST_CHECK_THROW( ++itr, koinos::exception ); + BOOST_CHECK_THROW( itr.key(), koinos::exception ); + --itr; + BOOST_CHECK_EQUAL( itr.key(), "charlie" ); + BOOST_CHECK_EQUAL( *itr, "3" ); + --itr; + BOOST_CHECK_EQUAL( itr.key(), "bob" ); + BOOST_CHECK_EQUAL( *itr, "6" ); + --itr; + BOOST_CHECK_EQUAL( itr.key(), "alice" ); + BOOST_CHECK_EQUAL( *itr, "4" ); + } + } + KOINOS_CATCH_LOG_AND_RETHROW( info ) +} + +BOOST_AUTO_TEST_CASE( reset_test ) +{ + try + { + BOOST_TEST_MESSAGE( "Creating object on transient state node" ); + + auto shared_db_lock = db.get_shared_lock(); + + crypto::multihash state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); + auto state_1 = db.create_writable_node( db.get_head( shared_db_lock )->id(), + state_id, + protocol::block_header(), + shared_db_lock ); + object_space space; + std::string a_key = "a"; + std::string a_val = "alice"; + + chain::database_key db_key; + *db_key.mutable_space() = space; + db_key.set_key( a_key ); + auto key_size = util::converter::as< std::string >( db_key ).size(); + + BOOST_CHECK_EQUAL( state_1->put_object( space, a_key, &a_val ), a_val.size() + key_size ); + db.finalize_node( state_1->id(), shared_db_lock ); + + auto val_ptr = db.get_head( shared_db_lock )->get_object( space, a_key ); + BOOST_REQUIRE( val_ptr ); + BOOST_CHECK_EQUAL( *val_ptr, a_val ); + + BOOST_TEST_MESSAGE( "Closing and opening database" ); + shared_db_lock.reset(); + state_1.reset(); + db.close( db.get_unique_lock() ); + + BOOST_CHECK_THROW( db.reset( db.get_unique_lock() ), koinos::exception ); + + shared_db_lock = db.get_shared_lock(); + BOOST_CHECK_THROW( db.get_node_at_revision( 1, shared_db_lock ), koinos::exception ); + BOOST_CHECK_THROW( db.get_node_at_revision( 1, crypto::hash( crypto::multicodec::sha2_256, 1 ), shared_db_lock ), + koinos::exception ); + BOOST_CHECK_THROW( db.get_node( crypto::hash( crypto::multicodec::sha2_256, 1 ), shared_db_lock ), + koinos::exception ); + BOOST_CHECK_THROW( db.create_writable_node( crypto::multihash::zero( crypto::multicodec::sha2_256 ), + crypto::hash( crypto::multicodec::sha2_256, 1 ), + protocol::block_header(), + shared_db_lock ), + koinos::exception ); + BOOST_CHECK_THROW( db.finalize_node( crypto::hash( crypto::multicodec::sha2_256, 1 ), shared_db_lock ), + koinos::exception ); + BOOST_CHECK_THROW( db.discard_node( crypto::hash( crypto::multicodec::sha2_256, 1 ), shared_db_lock ), + koinos::exception ); + BOOST_CHECK_THROW( db.get_head( shared_db_lock ), koinos::exception ); + BOOST_CHECK_THROW( db.get_fork_heads( shared_db_lock ), koinos::exception ); + BOOST_CHECK_THROW( db.get_root( shared_db_lock ), koinos::exception ); + shared_db_lock.reset(); + + BOOST_CHECK_THROW( db.commit_node( crypto::hash( crypto::multicodec::sha2_256, 1 ), db.get_unique_lock() ), + koinos::exception ); + + db.open( + temp, + []( state_db::state_node_ptr root ) {}, + &state_db::fifo_comparator, + db.get_unique_lock() ); + + shared_db_lock = db.get_shared_lock(); + + // Object should not exist on persistent database (state node was not committed) + BOOST_CHECK( !db.get_head( shared_db_lock )->get_object( space, a_key ) ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == crypto::multihash::zero( crypto::multicodec::sha2_256 ) ); + BOOST_CHECK( db.get_head( shared_db_lock )->revision() == 0 ); + + BOOST_TEST_MESSAGE( "Creating object on committed state node" ); + + state_1 = db.create_writable_node( db.get_head( shared_db_lock )->id(), + state_id, + protocol::block_header(), + shared_db_lock ); + BOOST_CHECK_EQUAL( state_1->put_object( space, a_key, &a_val ), a_val.size() + key_size ); + db.finalize_node( state_1->id(), shared_db_lock ); + auto state_1_id = state_1->id(); + state_1.reset(); + shared_db_lock.reset(); + db.commit_node( state_1_id, db.get_unique_lock() ); + + shared_db_lock = db.get_shared_lock(); + val_ptr = db.get_head( shared_db_lock )->get_object( space, a_key ); + BOOST_REQUIRE( val_ptr ); + BOOST_CHECK_EQUAL( *val_ptr, a_val ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == crypto::hash( crypto::multicodec::sha2_256, 1 ) ); + + BOOST_TEST_MESSAGE( "Closing and opening database" ); + shared_db_lock.reset(); + state_1.reset(); + db.close( db.get_unique_lock() ); + db.open( + temp, + []( state_db::state_node_ptr root ) {}, + &state_db::fifo_comparator, + db.get_unique_lock() ); + + // State node was committed and should exist on open + shared_db_lock = db.get_shared_lock(); + val_ptr = db.get_head( shared_db_lock )->get_object( space, a_key ); + BOOST_REQUIRE( val_ptr ); + BOOST_CHECK_EQUAL( *val_ptr, a_val ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == crypto::hash( crypto::multicodec::sha2_256, 1 ) ); + BOOST_CHECK( db.get_head( shared_db_lock )->revision() == 1 ); + + BOOST_TEST_MESSAGE( "Resetting database" ); + shared_db_lock.reset(); + db.reset( db.get_unique_lock() ); + + // Object should not exist on reset db + shared_db_lock = db.get_shared_lock(); + BOOST_CHECK( !db.get_head( shared_db_lock )->get_object( space, a_key ) ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == crypto::multihash::zero( crypto::multicodec::sha2_256 ) ); + BOOST_CHECK( db.get_head( shared_db_lock )->revision() == 0 ); + } + KOINOS_CATCH_LOG_AND_RETHROW( info ) +} + +BOOST_AUTO_TEST_CASE( anonymous_node_test ) +{ + try + { + BOOST_TEST_MESSAGE( "Creating object" ); + object_space space; + + auto shared_db_lock = db.get_shared_lock(); + + crypto::multihash state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); + auto state_1 = db.create_writable_node( db.get_head( shared_db_lock )->id(), + state_id, + protocol::block_header(), + shared_db_lock ); + std::string a_key = "a"; + std::string a_val = "alice"; + + chain::database_key db_key; + *db_key.mutable_space() = space; + db_key.set_key( a_key ); + auto key_size = util::converter::as< std::string >( db_key ).size(); + + BOOST_CHECK( state_1->put_object( space, a_key, &a_val ) == a_val.size() + key_size ); + + auto ptr = state_1->get_object( space, a_key ); + BOOST_REQUIRE( ptr ); + BOOST_CHECK_EQUAL( *ptr, a_val ); + + { + BOOST_TEST_MESSAGE( "Creating anonymous state node" ); + auto anon_state = state_1->create_anonymous_node(); + + BOOST_REQUIRE( anon_state->id() == state_1->id() ); + BOOST_REQUIRE( anon_state->revision() == state_1->revision() ); + BOOST_REQUIRE( anon_state->parent_id() == state_1->parent_id() ); + + BOOST_TEST_MESSAGE( "Modifying object" ); + a_val = "alicia"; + + BOOST_CHECK( anon_state->put_object( space, a_key, &a_val ) == 1 ); + + ptr = anon_state->get_object( space, a_key ); + BOOST_REQUIRE( ptr ); + BOOST_CHECK_EQUAL( *ptr, a_val ); + + ptr = state_1->get_object( space, a_key ); + BOOST_REQUIRE( ptr ); + BOOST_CHECK_EQUAL( *ptr, "alice" ); + + BOOST_TEST_MESSAGE( "Deleting anonymous node" ); + } + + { + BOOST_TEST_MESSAGE( "Creating anonymous state node" ); + auto anon_state = state_1->create_anonymous_node(); + + BOOST_TEST_MESSAGE( "Modifying object" ); + + BOOST_CHECK( anon_state->put_object( space, a_key, &a_val ) == 1 ); + + ptr = anon_state->get_object( space, a_key ); + BOOST_REQUIRE( ptr ); + BOOST_CHECK_EQUAL( *ptr, a_val ); + + ptr = state_1->get_object( space, a_key ); + BOOST_REQUIRE( ptr ); + BOOST_CHECK_EQUAL( *ptr, "alice" ); + + BOOST_TEST_MESSAGE( "Committing anonymous node" ); + anon_state->commit(); + + ptr = state_1->get_object( space, a_key ); + BOOST_REQUIRE( ptr ); + BOOST_CHECK_EQUAL( *ptr, a_val ); + } + + ptr = state_1->get_object( space, a_key ); + BOOST_REQUIRE( ptr ); + BOOST_CHECK_EQUAL( *ptr, a_val ); + } + KOINOS_CATCH_LOG_AND_RETHROW( info ) +} + +BOOST_AUTO_TEST_CASE( merkle_root_test ) +{ + try + { + auto shared_db_lock = db.get_shared_lock(); + + auto state_1_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); + auto state_1 = db.create_writable_node( db.get_head( shared_db_lock )->id(), + state_1_id, + protocol::block_header(), + shared_db_lock ); + + object_space space; + std::string a_key = "a"; + std::string a_val = "alice"; + std::string b_key = "b"; + std::string b_val = "bob"; + std::string c_key = "c"; + std::string c_val = "charlie"; + + state_1->put_object( space, c_key, &c_val ); + state_1->put_object( space, b_key, &b_val ); + state_1->put_object( space, a_key, &a_val ); + + chain::database_key a_db_key; + *a_db_key.mutable_space() = space; + a_db_key.set_key( a_key ); + + chain::database_key b_db_key; + *b_db_key.mutable_space() = space; + b_db_key.set_key( b_key ); + + chain::database_key c_db_key; + *c_db_key.mutable_space() = space; + c_db_key.set_key( c_key ); + + std::vector< std::string > merkle_leafs; + merkle_leafs.emplace_back( koinos::util::converter::as< std::string >( a_db_key ) ); + merkle_leafs.push_back( a_val ); + merkle_leafs.emplace_back( koinos::util::converter::as< std::string >( b_db_key ) ); + merkle_leafs.push_back( b_val ); + merkle_leafs.emplace_back( koinos::util::converter::as< std::string >( c_db_key ) ); + merkle_leafs.push_back( c_val ); + + BOOST_CHECK_THROW( state_1->merkle_root(), koinos::exception ); + db.finalize_node( state_1_id, shared_db_lock ); + + auto merkle_root = + koinos::crypto::merkle_tree< std::string >( koinos::crypto::multicodec::sha2_256, merkle_leafs ).root()->hash(); + BOOST_CHECK_EQUAL( merkle_root, state_1->merkle_root() ); + + auto state_2_id = crypto::hash( crypto::multicodec::sha2_256, 2 ); + auto state_2 = db.create_writable_node( state_1_id, state_2_id, protocol::block_header(), shared_db_lock ); + + std::string d_key = "d"; + std::string d_val = "dave"; + a_val = "alicia"; + + state_2->put_object( space, a_key, &a_val ); + state_2->put_object( space, d_key, &d_val ); + state_2->remove_object( space, b_key ); + + chain::database_key d_db_key; + *d_db_key.mutable_space() = space; + d_db_key.set_key( d_key ); + + merkle_leafs.clear(); + merkle_leafs.emplace_back( koinos::util::converter::as< std::string >( a_db_key ) ); + merkle_leafs.push_back( a_val ); + merkle_leafs.emplace_back( koinos::util::converter::as< std::string >( b_db_key ) ); + merkle_leafs.push_back( "" ); + merkle_leafs.emplace_back( koinos::util::converter::as< std::string >( d_db_key ) ); + merkle_leafs.push_back( d_val ); + + db.finalize_node( state_2_id, shared_db_lock ); + merkle_root = + koinos::crypto::merkle_tree< std::string >( koinos::crypto::multicodec::sha2_256, merkle_leafs ).root()->hash(); + BOOST_CHECK_EQUAL( merkle_root, state_2->merkle_root() ); + + shared_db_lock.reset(); + state_1.reset(); + state_2.reset(); + db.commit_node( state_2_id, db.get_unique_lock() ); + state_2 = db.get_node( state_2_id, db.get_shared_lock() ); + BOOST_CHECK_EQUAL( merkle_root, state_2->merkle_root() ); + } + KOINOS_CATCH_LOG_AND_RETHROW( info ) +} + +BOOST_AUTO_TEST_CASE( get_delta_entries_test ) +{ + try + { + auto shared_db_lock = db.get_shared_lock(); + + auto state_1_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); + auto state_1 = db.create_writable_node( db.get_head( shared_db_lock )->id(), + state_1_id, + protocol::block_header(), + shared_db_lock ); + + object_space space; + std::string a_key = "a"; + std::string a_val = "alice"; + std::string b_key = "b"; + std::string b_val = "bob"; + std::string c_key = "c"; + std::string c_val = "charlie"; + + state_1->put_object( space, c_key, &c_val ); + state_1->put_object( space, b_key, &b_val ); + state_1->put_object( space, a_key, &a_val ); + + chain::database_key a_db_key; + *a_db_key.mutable_space() = space; + a_db_key.set_key( a_key ); + + chain::database_key b_db_key; + *b_db_key.mutable_space() = space; + b_db_key.set_key( b_key ); + + chain::database_key c_db_key; + *c_db_key.mutable_space() = space; + c_db_key.set_key( c_key ); + + auto entries = state_1->get_delta_entries(); + + BOOST_CHECK_EQUAL( 3, entries.size() ); + + BOOST_CHECK_EQUAL( a_key, entries[ 0 ].key() ); + BOOST_CHECK_EQUAL( space.DebugString(), entries[ 0 ].object_space().DebugString() ); + BOOST_CHECK_EQUAL( a_val, entries[ 0 ].value() ); + + BOOST_CHECK_EQUAL( b_key, entries[ 1 ].key() ); + BOOST_CHECK_EQUAL( space.DebugString(), entries[ 1 ].object_space().DebugString() ); + BOOST_CHECK_EQUAL( b_val, entries[ 1 ].value() ); + + BOOST_CHECK_EQUAL( c_key, entries[ 2 ].key() ); + BOOST_CHECK_EQUAL( space.DebugString(), entries[ 2 ].object_space().DebugString() ); + BOOST_CHECK_EQUAL( c_val, entries[ 2 ].value() ); + + db.finalize_node( state_1_id, shared_db_lock ); + + auto state_2_id = crypto::hash( crypto::multicodec::sha2_256, 2 ); + auto state_2 = db.create_writable_node( state_1_id, state_2_id, protocol::block_header(), shared_db_lock ); + + std::string d_key = "d"; + std::string d_val = "dave"; + a_val = "alicia"; + + state_2->put_object( space, a_key, &a_val ); + state_2->put_object( space, d_key, &d_val ); + state_2->remove_object( space, b_key ); + + chain::database_key d_db_key; + *d_db_key.mutable_space() = space; + d_db_key.set_key( d_key ); + + auto entries2 = state_2->get_delta_entries(); + BOOST_CHECK_EQUAL( 3, entries.size() ); + + BOOST_CHECK_EQUAL( a_key, entries2[ 0 ].key() ); + BOOST_CHECK_EQUAL( space.DebugString(), entries2[ 0 ].object_space().DebugString() ); + BOOST_CHECK_EQUAL( a_val, entries2[ 0 ].value() ); + + BOOST_CHECK_EQUAL( b_key, entries2[ 1 ].key() ); + BOOST_CHECK_EQUAL( space.DebugString(), entries2[ 1 ].object_space().DebugString() ); + BOOST_CHECK_EQUAL( false, entries2[ 1 ].has_value() ); // Deleted value + + BOOST_CHECK_EQUAL( d_key, entries2[ 2 ].key() ); + BOOST_CHECK_EQUAL( space.DebugString(), entries2[ 2 ].object_space().DebugString() ); + BOOST_CHECK_EQUAL( d_val, entries2[ 2 ].value() ); + } + KOINOS_CATCH_LOG_AND_RETHROW( info ) +} + +BOOST_AUTO_TEST_CASE( rocksdb_backend_test ) +{ + try + { + koinos::state_db::backends::rocksdb::rocksdb_backend backend; + auto temp = std::filesystem::temp_directory_path() / util::random_alphanumeric( 8 ); + + BOOST_REQUIRE_THROW( backend.open( temp ), koinos::exception ); + + BOOST_CHECK_THROW( backend.begin(), koinos::exception ); + BOOST_CHECK_THROW( backend.end(), koinos::exception ); + BOOST_CHECK_THROW( backend.put( "foo", "bar" ), koinos::exception ); + BOOST_CHECK_THROW( backend.get( "foo" ), koinos::exception ); + BOOST_CHECK_THROW( backend.erase( "foo" ), koinos::exception ); + BOOST_CHECK_THROW( backend.clear(), koinos::exception ); + BOOST_CHECK_THROW( backend.size(), koinos::exception ); + BOOST_CHECK_THROW( backend.empty(), koinos::exception ); + BOOST_CHECK_THROW( backend.find( "foo" ), koinos::exception ); + BOOST_CHECK_THROW( backend.lower_bound( "foo" ), koinos::exception ); + BOOST_CHECK_THROW( backend.flush(), koinos::exception ); + BOOST_CHECK( backend.revision() == 0 ); + BOOST_CHECK( backend.id() == koinos::crypto::multihash::zero( koinos::crypto::multicodec::sha2_256 ) ); + + std::filesystem::create_directory( temp ); + backend.open( temp ); + + auto itr = backend.begin(); + BOOST_CHECK( itr == backend.end() ); + + backend.put( "foo", "bar" ); + itr = backend.begin(); + BOOST_CHECK( itr != backend.end() ); + BOOST_CHECK( *itr == "bar" ); + + backend.put( "alice", "bob" ); + + itr = backend.begin(); + BOOST_CHECK( itr != backend.end() ); + BOOST_CHECK( *itr == "bob" ); + + ++itr; + BOOST_CHECK( *itr == "bar" ); + + ++itr; + BOOST_CHECK( itr == backend.end() ); + + --itr; + BOOST_CHECK( itr != backend.end() ); + BOOST_CHECK( *itr == "bar" ); + + itr = backend.lower_bound( "charlie" ); + BOOST_CHECK( itr != backend.end() ); + BOOST_CHECK( *itr == "bar" ); + + itr = backend.lower_bound( "foo" ); + BOOST_CHECK( itr != backend.end() ); + BOOST_CHECK( *itr == "bar" ); + + backend.put( "foo", "blob" ); + itr = backend.find( "foo" ); + BOOST_CHECK( itr != backend.end() ); + BOOST_CHECK( *itr == "blob" ); + + --itr; + BOOST_CHECK( itr != backend.end() ); + BOOST_CHECK( *itr == "bob" ); + + backend.erase( "foo" ); + + itr = backend.begin(); + BOOST_CHECK( itr != backend.end() ); + BOOST_CHECK( *itr == "bob" ); + + itr = backend.find( "foo" ); + BOOST_CHECK( itr == backend.end() ); + + backend.erase( "foo" ); + + backend.erase( "alice" ); + itr = backend.end(); + BOOST_CHECK( itr == backend.end() ); + + std::filesystem::remove_all( temp ); + } + KOINOS_CATCH_LOG_AND_RETHROW( info ) +} + +BOOST_AUTO_TEST_CASE( rocksdb_object_cache_test ) +{ + try + { + std::size_t cache_size = 1'024; + koinos::state_db::backends::rocksdb::object_cache cache( cache_size ); + using value_type = koinos::state_db::backends::rocksdb::object_cache::value_type; + + std::string a_key = "a"; + std::string a_val = "alice"; + auto a_ptr = std::make_shared< const value_type >( a_val ); + + { + auto [ cache_hit, val ] = cache.get( a_key ); + BOOST_CHECK( !cache_hit ); + BOOST_CHECK( !val ); + } + + BOOST_CHECK( cache.put( a_key, a_ptr ) ); + + { + auto [ cache_hit, val_ptr ] = cache.get( a_key ); + BOOST_CHECK( cache_hit ); + BOOST_REQUIRE( val_ptr ); + BOOST_CHECK_EQUAL( *val_ptr, a_val ); + } + + std::string b_key = "b"; + std::string b_val = "bob"; + auto b_ptr = std::make_shared< const value_type >( b_val ); + + cache.put( b_key, b_ptr ); + + { + auto [ cache_hit, val_ptr ] = cache.get( b_key ); + BOOST_CHECK( cache_hit ); + BOOST_REQUIRE( val_ptr ); + BOOST_CHECK_EQUAL( *val_ptr, b_val ); + } + + // Will put 'a' first in the cache to evict 'b' + cache.get( a_key ); + + std::string fill_key = "f"; + std::string fill_val( cache_size - a_val.size() - b_val.size() + 1, 'f' ); + auto fill_ptr = std::make_shared< const value_type >( fill_val ); + BOOST_CHECK( cache.put( fill_key, fill_ptr ) ); + + { + auto [ cache_hit, val_ptr ] = cache.get( b_key ); + BOOST_CHECK( !cache_hit ); + BOOST_CHECK( !val_ptr ); + } + + { + auto [ cache_hit, val_ptr ] = cache.get( a_key ); + BOOST_CHECK( cache_hit ); + BOOST_REQUIRE( val_ptr ); + BOOST_CHECK_EQUAL( *val_ptr, a_val ); + } + + BOOST_CHECK( cache.put( fill_key, fill_ptr ) ); + { + auto [ cache_hit, val_ptr ] = cache.get( b_key ); + BOOST_CHECK( !cache_hit ); + BOOST_CHECK( !val_ptr ); + } + + std::string null_key = "n"; + std::shared_ptr< const value_type > null_ptr; + BOOST_CHECK( !cache.put( null_key, null_ptr ) ); + + { + auto [ cache_hit, val_ptr ] = cache.get( null_key ); + BOOST_CHECK( cache_hit ); + BOOST_REQUIRE( !val_ptr ); + } + } + KOINOS_CATCH_LOG_AND_RETHROW( info ) +} + +BOOST_AUTO_TEST_CASE( map_backend_test ) +{ + try + { + koinos::state_db::backends::map::map_backend backend; + + auto itr = backend.begin(); + BOOST_CHECK( itr == backend.end() ); + + backend.put( "foo", "bar" ); + itr = backend.begin(); + BOOST_CHECK( itr != backend.end() ); + BOOST_CHECK( *itr == "bar" ); + + backend.put( "alice", "bob" ); + + itr = backend.begin(); + BOOST_CHECK( itr != backend.end() ); + BOOST_CHECK( *itr == "bob" ); + + ++itr; + BOOST_CHECK( *itr == "bar" ); + + ++itr; + BOOST_CHECK( itr == backend.end() ); + + --itr; + BOOST_CHECK( itr != backend.end() ); + BOOST_CHECK( *itr == "bar" ); + + itr = backend.lower_bound( "charlie" ); + BOOST_CHECK( itr != backend.end() ); + BOOST_CHECK( *itr == "bar" ); + + itr = backend.lower_bound( "foo" ); + BOOST_CHECK( itr != backend.end() ); + BOOST_CHECK( *itr == "bar" ); + + backend.put( "foo", "blob" ); + itr = backend.find( "foo" ); + BOOST_CHECK( itr != backend.end() ); + BOOST_CHECK( *itr == "blob" ); + + --itr; + BOOST_CHECK( itr != backend.end() ); + BOOST_CHECK( *itr == "bob" ); + + backend.erase( "foo" ); + + itr = backend.begin(); + BOOST_CHECK( itr != backend.end() ); + BOOST_CHECK( *itr == "bob" ); + + itr = backend.find( "foo" ); + BOOST_CHECK( itr == backend.end() ); + + backend.erase( "foo" ); + + backend.erase( "alice" ); + itr = backend.end(); + BOOST_CHECK( itr == backend.end() ); + + backend.put( "foo", "bar" ); + BOOST_REQUIRE( backend.get( "foo" ) ); + BOOST_CHECK_EQUAL( *backend.get( "foo" ), "bar" ); + } + KOINOS_CATCH_LOG_AND_RETHROW( info ) +} + +BOOST_AUTO_TEST_CASE( fork_resolution ) +{ + try + { + /** + * The final fork graph looks like the following: + * + * / state_1 (100) --- state_4 (110) + * / \ + * genesis --- state_2 (99) \ state_5 (110) + * \ + * \ state_3 (101) + */ + + BOOST_TEST_MESSAGE( "Test default FIFO fork resolution" ); + + auto shared_db_lock = db.get_shared_lock(); + auto genesis_id = db.get_head( shared_db_lock )->id(); + + protocol::block_header header; + header.set_timestamp( 100 ); + + crypto::multihash state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); + auto state_1 = db.create_writable_node( db.get_head( shared_db_lock )->id(), state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_1 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == genesis_id ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); + + header.set_timestamp( 99 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 2 ); + auto state_2 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_2 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); + + header.set_timestamp( 101 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 3 ); + auto state_3 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_3 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); + + header.set_timestamp( 110 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 4 ); + auto state_4 = db.create_writable_node( state_1->id(), state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_4 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_4->id() ); + + state_id = crypto::hash( crypto::multicodec::sha2_256, 5 ); + auto state_5 = db.create_writable_node( state_1->id(), state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_5 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_4->id() ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_4->id() ); + + shared_db_lock.reset(); + state_1.reset(); + state_2.reset(); + state_3.reset(); + state_4.reset(); + state_5.reset(); + + BOOST_TEST_MESSAGE( "Test block time fork resolution" ); + + db.close( db.get_unique_lock() ); + db.open( + temp, + [ & ]( state_node_ptr ) {}, + &state_db::block_time_comparator, + db.get_unique_lock() ); + shared_db_lock = db.get_shared_lock(); + + header.set_timestamp( 100 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); + state_1 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_1 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == genesis_id ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); + + header.set_timestamp( 99 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 2 ); + state_2 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_2 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); + + header.set_timestamp( 101 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 3 ); + state_3 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_3 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); + + header.set_timestamp( 110 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 4 ); + state_4 = db.create_writable_node( state_1->id(), state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_4 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_4->id() ); + + state_id = crypto::hash( crypto::multicodec::sha2_256, 5 ); + state_5 = db.create_writable_node( state_1->id(), state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_5 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_4->id() ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_4->id() ); + + shared_db_lock.reset(); + state_1.reset(); + state_2.reset(); + state_3.reset(); + state_4.reset(); + state_5.reset(); + + BOOST_TEST_MESSAGE( "Test pob fork resolution" ); + + db.close( db.get_unique_lock() ); + db.open( + temp, + [ & ]( state_node_ptr ) {}, + &state_db::pob_comparator, + db.get_unique_lock() ); + shared_db_lock = db.get_shared_lock(); + + std::string signer1 = "signer1"; + std::string signer2 = "signer2"; + std::string signer3 = "signer3"; + std::string signer4 = "signer4"; + std::string signer5 = "signer5"; + + // BEGIN: Mimic block time behavior (as long as signers are different) + + header.set_timestamp( 100 ); + header.set_signer( signer1 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); + state_1 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_1 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == genesis_id ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); + + header.set_timestamp( 99 ); + header.set_signer( signer2 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 2 ); + state_2 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_2 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); + + header.set_timestamp( 101 ); + header.set_signer( signer3 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 3 ); + state_3 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_3 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); + + header.set_timestamp( 110 ); + header.set_signer( signer4 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 4 ); + state_4 = db.create_writable_node( state_1->id(), state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_4 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_4->id() ); + + header.set_signer( signer5 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 5 ); + state_5 = db.create_writable_node( state_1->id(), state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_5 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_4->id() ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_4->id() ); + + // END: Mimic block time behavior (as long as signers are different) + + shared_db_lock.reset(); + state_1.reset(); + state_2.reset(); + state_3.reset(); + state_4.reset(); + state_5.reset(); + + db.close( db.get_unique_lock() ); + db.open( + temp, + [ & ]( state_node_ptr ) {}, + &state_db::pob_comparator, + db.get_unique_lock() ); + shared_db_lock = db.get_shared_lock(); + + // BEGIN: Create two forks, then double produce on the newer fork + + /** + * / state_3 (height: 2, time: 101, signer: signer3) <-- Double + * production + * / + * / state_1 (height: 1, time: 100) - state_4 (height: 2, time: 102, signer: signer3) <-- Double + * production + * / + * genesis --- state_2 (height: 1, time: 99) <-- Resulting head + * + * + */ + + header.set_timestamp( 100 ); + header.set_signer( signer1 ); + header.set_height( 1 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); + state_1 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_1 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == genesis_id ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); + + header.set_timestamp( 99 ); + header.set_signer( signer2 ); + header.set_height( 1 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 2 ); + state_2 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_2 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); + + header.set_timestamp( 101 ); + header.set_signer( signer3 ); + header.set_height( 2 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 3 ); + state_3 = db.create_writable_node( state_1->id(), state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_3 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_3->id() ); + + header.set_timestamp( 102 ); + header.set_signer( signer3 ); + header.set_height( 2 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 4 ); + state_4 = db.create_writable_node( state_1->id(), state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_4 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_3->id() ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); + + /** + * Fork heads + * + * / state_3 (height: 2, time: 101) + * / + * / state_1 (height: 1, time: 100) - state_4 (height: 2, time: 102) + * / + * genesis --- state_2 (height: 1, time: 99) + * + * + */ + + auto fork_heads = db.get_fork_heads( shared_db_lock ); + BOOST_REQUIRE( fork_heads.size() == 3 ); + auto it = std::find_if( std::begin( fork_heads ), + std::end( fork_heads ), + [ & ]( state_node_ptr p ) + { + return p->id() == state_2->id(); + } ); + BOOST_REQUIRE( it != std::end( fork_heads ) ); + it = std::find_if( std::begin( fork_heads ), + std::end( fork_heads ), + [ & ]( state_node_ptr p ) + { + return p->id() == state_3->id(); + } ); + BOOST_REQUIRE( it != std::end( fork_heads ) ); + it = std::find_if( std::begin( fork_heads ), + std::end( fork_heads ), + [ & ]( state_node_ptr p ) + { + return p->id() == state_4->id(); + } ); + BOOST_REQUIRE( it != std::end( fork_heads ) ); + fork_heads.clear(); + + // END: Create two forks, then double produce on the newer fork + + shared_db_lock.reset(); + state_1.reset(); + state_2.reset(); + state_3.reset(); + state_4.reset(); + state_5.reset(); + + db.close( db.get_unique_lock() ); + db.open( + temp, + [ & ]( state_node_ptr ) {}, + &state_db::pob_comparator, + db.get_unique_lock() ); + shared_db_lock = db.get_shared_lock(); + + // BEGIN: Create two forks, then double produce on the older fork + + /** + * Resulting head / state_3 (height: 2, time: 101, signer: signer3) <-- Double + * production V / / state_1 (height: 1, time: 99) --- state_4 (height: 2, time: 102, signer: + * signer3) <-- Double production + * / + * genesis --- state_2 (height: 1, time: 100) + * + * + */ + + header.set_timestamp( 99 ); + header.set_signer( signer1 ); + header.set_height( 1 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); + state_1 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_1 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == genesis_id ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); + + header.set_timestamp( 100 ); + header.set_signer( signer2 ); + header.set_height( 1 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 2 ); + state_2 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_2 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); + + header.set_timestamp( 101 ); + header.set_signer( signer3 ); + header.set_height( 2 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 3 ); + state_3 = db.create_writable_node( state_1->id(), state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_3 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_3->id() ); + + header.set_timestamp( 102 ); + header.set_signer( signer3 ); + header.set_height( 2 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 4 ); + state_4 = db.create_writable_node( state_1->id(), state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_4 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_3->id() ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); + + /** + * Fork heads + * + * / state_1 (height: 1, time: 99) + * / + * genesis --- state_2 (height: 1, time: 100) + * + * + */ + + fork_heads = db.get_fork_heads( shared_db_lock ); + BOOST_REQUIRE( fork_heads.size() == 2 ); + it = std::find_if( std::begin( fork_heads ), + std::end( fork_heads ), + [ & ]( state_node_ptr p ) + { + return p->id() == state_1->id(); + } ); + BOOST_REQUIRE( it != std::end( fork_heads ) ); + it = std::find_if( std::begin( fork_heads ), + std::end( fork_heads ), + [ & ]( state_node_ptr p ) + { + return p->id() == state_2->id(); + } ); + BOOST_REQUIRE( it != std::end( fork_heads ) ); + fork_heads.clear(); + + // END: Create two forks, then double produce on the older fork + + shared_db_lock.reset(); + state_1.reset(); + state_2.reset(); + state_3.reset(); + state_4.reset(); + state_5.reset(); + + db.close( db.get_unique_lock() ); + db.open( + temp, + [ & ]( state_node_ptr ) {}, + &state_db::pob_comparator, + db.get_unique_lock() ); + shared_db_lock = db.get_shared_lock(); + + // BEGIN: Edge case when double production is the first block + + /** + * + * + * / state_1 (height: 1, time: 99, signer: signer1) <--- Double production + * / + * genesis --- state_2 (height: 1, time: 100, signer: signer1) <--- Double production + * + * + */ + + header.set_timestamp( 99 ); + header.set_signer( signer1 ); + header.set_height( 1 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); + state_1 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_1 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == genesis_id ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); + + header.set_timestamp( 100 ); + header.set_signer( signer1 ); + header.set_height( 1 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 2 ); + state_2 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_2 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == genesis_id ); + + /** + * Fork heads + * + * genesis + * + */ + + fork_heads = db.get_fork_heads( shared_db_lock ); + BOOST_REQUIRE( fork_heads.size() == 1 ); + it = std::find_if( std::begin( fork_heads ), + std::end( fork_heads ), + [ & ]( state_node_ptr p ) + { + return p->id() == genesis_id; + } ); + BOOST_REQUIRE( it != std::end( fork_heads ) ); + fork_heads.clear(); + + // END: Edge case when double production is the first block + } + KOINOS_CATCH_LOG_AND_RETHROW( info ) +} + +BOOST_AUTO_TEST_CASE( restart_cache ) +{ + try + { + auto shared_db_lock = db.get_shared_lock(); + crypto::multihash state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); + auto state_1 = db.create_writable_node( db.get_head( shared_db_lock )->id(), + state_id, + protocol::block_header(), + shared_db_lock ); + BOOST_REQUIRE( state_1 ); + + object_space space; + std::string a_key = "a"; + std::string a_val = "alice"; + + chain::database_key db_key; + *db_key.mutable_space() = space; + db_key.set_key( a_key ); + + state_1->put_object( space, a_key, &a_val ); + + { + auto [ ptr, key ] = state_1->get_next_object( space, std::string() ); + + BOOST_REQUIRE( ptr ); + BOOST_CHECK_EQUAL( *ptr, a_val ); + BOOST_CHECK_EQUAL( key, a_key ); + } + + db.finalize_node( state_id, shared_db_lock ); + state_1.reset(); + shared_db_lock.reset(); + + db.commit_node( state_id, db.get_unique_lock() ); + + db.close( db.get_unique_lock() ); + db.open( + temp, + [ & ]( state_db::state_node_ptr root ) {}, + &state_db::fifo_comparator, + db.get_unique_lock() ); + shared_db_lock = db.get_shared_lock(); + + state_1 = db.get_root( shared_db_lock ); + { + auto [ ptr, key ] = state_1->get_next_object( space, std::string() ); + + BOOST_REQUIRE( ptr ); + BOOST_CHECK_EQUAL( *ptr, a_val ); + BOOST_CHECK_EQUAL( key, a_key ); + } + } + KOINOS_CATCH_LOG_AND_RETHROW( info ) +} + +BOOST_AUTO_TEST_CASE( persistence ) +{ + try + { + BOOST_TEST_MESSAGE( "Checking persistence when backed by rocksdb" ); + object_space space; + std::string a_key = "a"; + std::string a_val = "alice"; + + auto shared_db_lock = db.get_shared_lock(); + + chain::database_key db_key; + *db_key.mutable_space() = space; + db_key.set_key( a_key ); + auto key_size = util::converter::as< std::string >( db_key ).size(); + + crypto::multihash state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); + auto state_1 = db.create_writable_node( db.get_head( shared_db_lock )->id(), + state_id, + protocol::block_header(), + shared_db_lock ); + BOOST_REQUIRE( state_1 ); + BOOST_CHECK_EQUAL( state_1->put_object( space, a_key, &a_val ), a_val.size() + key_size ); + + db.finalize_node( state_id, shared_db_lock ); + + auto ptr = state_1->get_object( space, a_key ); + BOOST_REQUIRE( ptr ); + BOOST_CHECK_EQUAL( *ptr, a_val ); + + state_1.reset(); + shared_db_lock.reset(); + db.commit_node( state_id, db.get_unique_lock() ); + + db.close( db.get_unique_lock() ); + db.open( + temp, + [ & ]( state_db::state_node_ptr root ) {}, + &state_db::fifo_comparator, + db.get_unique_lock() ); + + shared_db_lock = db.get_shared_lock(); + state_1 = db.get_node( state_id, shared_db_lock ); + BOOST_REQUIRE( state_1 ); + + ptr = state_1->get_object( space, a_key ); + BOOST_REQUIRE( ptr ); + BOOST_CHECK_EQUAL( *ptr, a_val ); + + state_1.reset(); + shared_db_lock.reset(); + db.close( db.get_unique_lock() ); + + BOOST_TEST_MESSAGE( "Checking transience when backed by std::map" ); + db.open( + {}, + [ & ]( state_db::state_node_ptr root ) {}, + &state_db::fifo_comparator, + db.get_unique_lock() ); + + shared_db_lock = db.get_shared_lock(); + state_1 = db.create_writable_node( db.get_head( shared_db_lock )->id(), + state_id, + protocol::block_header(), + shared_db_lock ); + BOOST_REQUIRE( state_1 ); + BOOST_CHECK_EQUAL( state_1->put_object( space, a_key, &a_val ), a_val.size() + key_size ); + + db.finalize_node( state_id, shared_db_lock ); + ptr = state_1->get_object( space, a_key ); + BOOST_REQUIRE( ptr ); + BOOST_CHECK_EQUAL( *ptr, a_val ); + + state_1.reset(); + shared_db_lock.reset(); + db.commit_node( state_id, db.get_unique_lock() ); + + db.close( db.get_unique_lock() ); + db.open( + {}, + [ & ]( state_db::state_node_ptr root ) {}, + &state_db::fifo_comparator, + db.get_unique_lock() ); + + shared_db_lock = db.get_shared_lock(); + state_1 = db.get_node( state_id, shared_db_lock ); + BOOST_REQUIRE( !state_1 ); + + ptr = db.get_head( shared_db_lock )->get_object( space, a_key ); + BOOST_REQUIRE( !ptr ); + } + KOINOS_CATCH_LOG_AND_RETHROW( info ) +} + +BOOST_AUTO_TEST_CASE( clone_node ) +{ + try + { + BOOST_TEST_MESSAGE( "Check clone of un-finalized node" ); + + object_space space; + std::string a_key = "a"; + std::string a_val = "alice"; + std::string b_key = "bob"; + std::string b_val = "bob"; + std::string c_key = "charlie"; + std::string c_val = "charlie"; + std::string d_key = "dave"; + std::string d_val = "dave"; + + auto shared_db_lock = db.get_shared_lock(); + + chain::database_key db_key; + *db_key.mutable_space() = space; + db_key.set_key( a_key ); + + crypto::multihash state_1a_id = crypto::hash( crypto::multicodec::sha2_256, 0x1a ); + auto state_1a = db.create_writable_node( db.get_head( shared_db_lock )->id(), + state_1a_id, + protocol::block_header(), + shared_db_lock ); + BOOST_REQUIRE( state_1a ); + state_1a->put_object( space, a_key, &a_val ); + state_1a->put_object( space, b_key, &b_val ); + db.finalize_node( state_1a_id, shared_db_lock ); + + crypto::multihash state_2a_id = crypto::hash( crypto::multicodec::sha2_256, 0x2a ); + auto state_2a = db.create_writable_node( state_1a_id, state_2a_id, protocol::block_header(), shared_db_lock ); + BOOST_REQUIRE( state_2a ); + state_2a->put_object( space, c_key, &c_val ); + state_2a->remove_object( space, a_key ); + + crypto::multihash state_2b_id = crypto::hash( crypto::multicodec::sha2_256, 0x2b ); + auto state_2b = db.clone_node( state_2a_id, state_2b_id, protocol::block_header(), shared_db_lock ); + BOOST_REQUIRE( state_2b ); + BOOST_CHECK( !state_2b->is_finalized() ); + BOOST_CHECK( !state_2b->get_object( space, a_key ) ); + BOOST_REQUIRE( state_2b->get_object( space, b_key ) ); + BOOST_CHECK_EQUAL( *state_2b->get_object( space, b_key ), b_val ); + BOOST_REQUIRE( state_2b->get_object( space, c_key ) ); + BOOST_CHECK_EQUAL( *state_2b->get_object( space, c_key ), c_val ); + + state_2b->remove_object( space, b_key ); + state_2b->put_object( space, d_key, &d_val ); + + BOOST_REQUIRE( state_2a->get_object( space, b_key ) ); + BOOST_CHECK_EQUAL( *state_2a->get_object( space, b_key ), b_val ); + BOOST_CHECK( !state_2a->get_object( space, d_key ) ); + + BOOST_TEST_MESSAGE( "Checking clone of a finalized node" ); + + crypto::multihash state_1b_id = crypto::hash( crypto::multicodec::sha2_256, 0x1b ); + BOOST_REQUIRE_THROW( db.clone_node( state_1a_id, state_1b_id, protocol::block_header(), shared_db_lock ), + illegal_argument ); + } + KOINOS_CATCH_LOG_AND_RETHROW( info ) +} + +BOOST_AUTO_TEST_CASE( get_all_nodes ) +{ + try + { + BOOST_TEST_MESSAGE( "Create state nodes" ); + + auto shared_db_lock = db.get_shared_lock(); + auto root_id = db.get_root( shared_db_lock )->id(); + + crypto::multihash state_1a_id = crypto::hash( crypto::multicodec::sha2_256, 0x1a ); + auto state_1a = db.create_writable_node( root_id, state_1a_id, protocol::block_header(), shared_db_lock ); + BOOST_REQUIRE( state_1a ); + db.finalize_node( state_1a_id, shared_db_lock ); + + crypto::multihash state_1b_id = crypto::hash( crypto::multicodec::sha2_256, 0x1b ); + auto state_1b = db.create_writable_node( root_id, state_1b_id, protocol::block_header(), shared_db_lock ); + BOOST_REQUIRE( state_1b ); + + crypto::multihash state_2a_id = crypto::hash( crypto::multicodec::sha2_256, 0x2a ); + auto state_2a = db.create_writable_node( state_1a_id, state_2a_id, protocol::block_header(), shared_db_lock ); + BOOST_REQUIRE( state_2a ); + + crypto::multihash state_2b_id = crypto::hash( crypto::multicodec::sha2_256, 0x2b ); + auto state_2b = db.create_writable_node( state_1a_id, state_2b_id, protocol::block_header(), shared_db_lock ); + BOOST_REQUIRE( state_2b ); + + BOOST_TEST_MESSAGE( "Check all state nodes" ); + + auto nodes = db.get_all_nodes( shared_db_lock ); + BOOST_REQUIRE_EQUAL( nodes.size(), 5 ); + BOOST_CHECK( nodes[ 0 ]->id() == root_id ); + BOOST_CHECK( nodes[ 1 ]->id() == state_1b_id ); + BOOST_CHECK( nodes[ 2 ]->id() == state_2a_id ); + BOOST_CHECK( nodes[ 3 ]->id() == state_1a_id ); + BOOST_CHECK( nodes[ 4 ]->id() == state_2b_id ); + + BOOST_TEST_MESSAGE( "Commit 1a" ); + + nodes.clear(); + state_1a.reset(); + state_1b.reset(); + state_2a.reset(); + state_2b.reset(); + shared_db_lock.reset(); + + auto unique_db_lock = db.get_unique_lock(); + db.commit_node( state_1a_id, unique_db_lock ); + + BOOST_TEST_MESSAGE( "Check all state nodes" ); + + nodes = db.get_all_nodes( unique_db_lock ); + BOOST_REQUIRE_EQUAL( nodes.size(), 3 ); + BOOST_CHECK( nodes[ 0 ]->id() == state_2a_id ); + BOOST_CHECK( nodes[ 1 ]->id() == state_1a_id ); + BOOST_CHECK( nodes[ 2 ]->id() == state_2b_id ); + } + KOINOS_CATCH_LOG_AND_RETHROW( info ) +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/tests/tests/main.cpp b/tests/tests/main.cpp deleted file mode 100644 index b9ec7ab..0000000 --- a/tests/tests/main.cpp +++ /dev/null @@ -1,3 +0,0 @@ -#define BOOST_TEST_MODULE koinos_state_db_tests -#include -#include diff --git a/tests/tests/state_db_test.cpp b/tests/tests/state_db_test.cpp deleted file mode 100644 index ba406ef..0000000 --- a/tests/tests/state_db_test.cpp +++ /dev/null @@ -1,1742 +0,0 @@ -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -using namespace koinos; -using namespace koinos::state_db; -using state_db::detail::merge_state; -using state_db::detail::state_delta; -using namespace std::string_literals; - -struct test_block -{ - std::string previous; - uint64_t height = 0; - uint64_t nonce = 0; - - crypto::multihash get_id() const; -}; - -crypto::multihash test_block::get_id() const -{ - return crypto::hash( crypto::multicodec::sha2_256, util::converter::to< crypto::multihash >( previous ), height, nonce ); -} - -struct state_db_fixture -{ - state_db_fixture() - { - initialize_logging( "koinos_test", {}, "info" ); - - temp = std::filesystem::temp_directory_path() / util::random_alphanumeric( 8 ); - std::filesystem::create_directory( temp ); - - db.open( temp, [&]( state_db::state_node_ptr root ){}, fork_resolution_algorithm::fifo, db.get_unique_lock() ); - } - - ~state_db_fixture() - { - boost::log::core::get()->remove_all_sinks(); - db.close( db.get_unique_lock() ); - std::filesystem::remove_all( temp ); - } - - database db; - std::filesystem::path temp; -}; - -BOOST_FIXTURE_TEST_SUITE( state_db_tests, state_db_fixture ) - -BOOST_AUTO_TEST_CASE( basic_test ) -{ try { - BOOST_TEST_MESSAGE( "Creating object" ); - object_space space; - std::string a_key = "a"; - std::string a_val = "alice"; - - auto shared_db_lock = db.get_shared_lock(); - - chain::database_key db_key; - *db_key.mutable_space() = space; - db_key.set_key( a_key ); - auto key_size = util::converter::as< std::string >( db_key ).size(); - - crypto::multihash state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); - auto state_1 = db.create_writable_node( db.get_head( shared_db_lock )->id(), state_id, protocol::block_header(), shared_db_lock ); - BOOST_REQUIRE( state_1 ); - BOOST_CHECK_EQUAL( state_1->put_object( space, a_key, &a_val ), a_val.size() + key_size ); - - // Object should not exist on older state node - BOOST_CHECK_EQUAL( db.get_root( shared_db_lock )->get_object( space, a_key ), nullptr ); - - auto ptr = state_1->get_object( space, a_key ); - BOOST_REQUIRE( ptr ); - BOOST_CHECK_EQUAL( *ptr, a_val ); - - BOOST_TEST_MESSAGE( "Modifying object" ); - - a_val = "alicia"; - BOOST_CHECK_EQUAL( state_1->put_object( space, a_key, &a_val ), 1 ); - - ptr = state_1->get_object( space, a_key ); - BOOST_REQUIRE( ptr ); - BOOST_CHECK_EQUAL( *ptr, a_val ); - - state_id = crypto::hash( crypto::multicodec::sha2_256, 2 ); - auto state_2 = db.create_writable_node( state_1->id(), state_id, protocol::block_header(), shared_db_lock ); - BOOST_CHECK( !state_2 ); - - db.finalize_node( state_1->id(), shared_db_lock ); - - BOOST_REQUIRE_THROW( state_1->put_object( space, a_key, &a_val ), node_finalized ); - - state_2 = db.create_writable_node( state_1->id(), state_id, protocol::block_header(), shared_db_lock ); - BOOST_REQUIRE( state_2 ); - a_val = "alex"; - BOOST_CHECK_EQUAL( state_2->put_object( space, a_key, &a_val ), -2 ); - - ptr = state_2->get_object( space, a_key ); - BOOST_REQUIRE( ptr ); - BOOST_CHECK_EQUAL( *ptr, a_val ); - - ptr = state_1->get_object( space, a_key ); - BOOST_REQUIRE( ptr ); - BOOST_CHECK_EQUAL( *ptr, "alicia" ); - - BOOST_TEST_MESSAGE( "Erasing object" ); - state_2->remove_object( space, a_key ); - - BOOST_CHECK( !state_2->get_object( space, a_key ) ); - - db.discard_node( state_2->id(), shared_db_lock ); - state_2 = db.get_node( state_2->id(), shared_db_lock ); - BOOST_CHECK( !state_2 ); - - ptr = state_1->get_object( space, a_key ); - BOOST_REQUIRE( ptr ); - BOOST_CHECK_EQUAL( *ptr, "alicia" ); - -} KOINOS_CATCH_LOG_AND_RETHROW(info) } - -BOOST_AUTO_TEST_CASE( fork_tests ) -{ try { - BOOST_TEST_MESSAGE( "Basic fork tests on state_db" ); - crypto::multihash id, prev_id, block_1000_id; - test_block b; - - auto shared_db_lock = db.get_shared_lock(); - - prev_id = db.get_root( shared_db_lock )->id(); - - for( uint64_t i = 1; i <= 2000; ++i ) - { - b.previous = util::converter::as< std::string >( prev_id ); - b.height = i; - id = b.get_id(); - - auto new_block = db.create_writable_node( prev_id, id, protocol::block_header(), shared_db_lock ); - BOOST_CHECK_EQUAL( b.height, new_block->revision() ); - db.finalize_node( id, shared_db_lock ); - - prev_id = id; - - if( i == 1000 ) block_1000_id = id; - } - - BOOST_REQUIRE( db.get_root( shared_db_lock )->id() == crypto::multihash::zero( crypto::multicodec::sha2_256 ) ); - BOOST_REQUIRE( db.get_root( shared_db_lock )->revision() == 0 ); - - BOOST_REQUIRE( db.get_head( shared_db_lock )->id() == prev_id ); - BOOST_REQUIRE( db.get_head( shared_db_lock )->revision() == 2000 ); - - BOOST_REQUIRE( db.get_node( block_1000_id, shared_db_lock )->id() == block_1000_id ); - BOOST_REQUIRE( db.get_node( block_1000_id, shared_db_lock )->revision() == 1000 ); - - auto fork_heads = db.get_fork_heads( shared_db_lock ); - BOOST_REQUIRE_EQUAL( fork_heads.size(), 1 ); - BOOST_REQUIRE( fork_heads[0]->id() == db.get_head( shared_db_lock )->id() ); - fork_heads.clear(); - - BOOST_TEST_MESSAGE( "Test commit" ); - shared_db_lock.reset(); - db.commit_node( block_1000_id, db.get_unique_lock() ); - shared_db_lock = db.get_shared_lock(); - BOOST_REQUIRE( db.get_root( shared_db_lock )->id() == block_1000_id ); - BOOST_REQUIRE( db.get_root( shared_db_lock )->revision() == 1000 ); - - fork_heads = db.get_fork_heads( shared_db_lock ); - BOOST_REQUIRE_EQUAL( fork_heads.size(), 1 ); - BOOST_REQUIRE( fork_heads[0]->id() == db.get_head( shared_db_lock )->id() ); - - crypto::multihash block_2000_id = id; - - BOOST_TEST_MESSAGE( "Test discard" ); - b.previous = util::converter::as< std::string >( db.get_head( shared_db_lock )->id() ); - b.height = db.get_head( shared_db_lock )->revision() + 1; - id = b.get_id(); - db.create_writable_node( util::converter::to< crypto::multihash >( b.previous ), id, protocol::block_header(), shared_db_lock ); - auto new_block = db.get_node( id, shared_db_lock ); - BOOST_REQUIRE( new_block ); - - fork_heads = db.get_fork_heads( shared_db_lock ); - BOOST_REQUIRE_EQUAL( fork_heads.size(), 1 ); - BOOST_REQUIRE( fork_heads[0]->id() == prev_id ); - - db.discard_node( id, shared_db_lock ); - - BOOST_REQUIRE( db.get_head( shared_db_lock )->id() == prev_id ); - BOOST_REQUIRE( db.get_head( shared_db_lock )->revision() == 2000 ); - - fork_heads = db.get_fork_heads( shared_db_lock ); - BOOST_REQUIRE_EQUAL( fork_heads.size(), 1 ); - BOOST_REQUIRE( fork_heads[0]->id() == prev_id ); - - // Shared ptr should still exist, but not be returned with get_node - BOOST_REQUIRE( new_block ); - BOOST_REQUIRE( !db.get_node( id, shared_db_lock ) ); - new_block.reset(); - - // Cannot discard head - BOOST_REQUIRE_THROW( db.discard_node( prev_id, shared_db_lock ), cannot_discard ); - - BOOST_TEST_MESSAGE( "Check duplicate node creation" ); - BOOST_REQUIRE( !db.create_writable_node( db.get_head( shared_db_lock )->parent_id(), db.get_head( shared_db_lock )->id(), protocol::block_header(), shared_db_lock ) ); - - BOOST_TEST_MESSAGE( "Check failed linking" ); - crypto::multihash zero = crypto::multihash::zero( crypto::multicodec::sha2_256 ); - BOOST_REQUIRE( !db.create_writable_node( zero, id, protocol::block_header(), shared_db_lock ) ); - - crypto::multihash head_id = db.get_head( shared_db_lock )->id(); - uint64_t head_rev = db.get_head( shared_db_lock )->revision(); - - BOOST_TEST_MESSAGE( "Test minority fork" ); - auto fork_node = db.get_node_at_revision( 1995, shared_db_lock ); - prev_id = fork_node->id(); - b.nonce = 1; - - auto old_block_1996_id = db.get_node_at_revision( 1996, shared_db_lock )->id(); - auto old_block_1997_id = db.get_node_at_revision( 1997, shared_db_lock )->id(); - - for ( uint64_t i = 1; i <= 5; ++i ) - { - b.previous = util::converter::as< std::string >( prev_id ); - b.height = fork_node->revision() + i; - id = b.get_id(); - - auto new_block = db.create_writable_node( prev_id, id, protocol::block_header(), shared_db_lock ); - BOOST_CHECK_EQUAL( b.height, new_block->revision() ); - db.finalize_node( id, shared_db_lock ); - - BOOST_CHECK( db.get_head( shared_db_lock )->id() == head_id ); - BOOST_CHECK( db.get_head( shared_db_lock )->revision() == head_rev ); - - prev_id = id; - } - - fork_heads = db.get_fork_heads( shared_db_lock ); - BOOST_REQUIRE_EQUAL( fork_heads.size(), 2 ); - BOOST_REQUIRE( ( fork_heads[0]->id() == db.get_head( shared_db_lock )->id() && fork_heads[1]->id() == id ) || - ( fork_heads[1]->id() == db.get_head( shared_db_lock )->id() && fork_heads[0]->id() == id ) ); - auto old_head_id = db.get_head( shared_db_lock )->id(); - - b.previous = util::converter::as< std::string >( prev_id ); - b.height = head_rev + 1; - id = b.get_id(); - - // When this node finalizes, it will be the longest path and should become head - new_block = db.create_writable_node( prev_id, id, protocol::block_header(), shared_db_lock ); - BOOST_CHECK_EQUAL( b.height, new_block->revision() ); - - BOOST_CHECK( db.get_head( shared_db_lock )->id() == head_id ); - BOOST_CHECK( db.get_head( shared_db_lock )->revision() == head_rev ); - - db.finalize_node( id, shared_db_lock ); - - fork_heads = db.get_fork_heads( shared_db_lock ); - BOOST_REQUIRE_EQUAL( fork_heads.size(), 2 ); - BOOST_REQUIRE( ( fork_heads[0]->id() == id && fork_heads[1]->id() == old_head_id ) || - ( fork_heads[1]->id() == id && fork_heads[0]->id() == old_head_id ) ); - - BOOST_CHECK( db.get_head( shared_db_lock )->id() == id ); - BOOST_CHECK( db.get_head( shared_db_lock )->revision() == b.height ); - - db.discard_node( old_block_1997_id, shared_db_lock ); - fork_heads = db.get_fork_heads( shared_db_lock ); - BOOST_REQUIRE_EQUAL( fork_heads.size(), 2 ); - BOOST_REQUIRE( ( fork_heads[0]->id() == id && fork_heads[1]->id() == old_block_1996_id ) || - ( fork_heads[1]->id() == id && fork_heads[0]->id() == old_block_1996_id ) ); - - db.discard_node( old_block_1996_id, shared_db_lock ); - fork_heads = db.get_fork_heads( shared_db_lock ); - BOOST_REQUIRE_EQUAL( fork_heads.size(), 1 ); - BOOST_REQUIRE( fork_heads[0]->id() == id ); - -} KOINOS_CATCH_LOG_AND_RETHROW(info) } - -BOOST_AUTO_TEST_CASE( merge_iterator ) -{ try { - std::filesystem::path temp = std::filesystem::temp_directory_path() / koinos::util::random_alphanumeric( 8 ); - std::filesystem::create_directory( temp ); - - using state_delta_ptr = std::shared_ptr< state_delta >; - std::deque< state_delta_ptr > delta_queue; - delta_queue.emplace_back( std::make_shared< state_delta >( temp ) ); - - // alice: 1 - // bob: 2 - // charlie: 3 - delta_queue.back()->put( "alice", "1" ); - delta_queue.back()->put( "bob", "2" ); - delta_queue.back()->put( "charlie", "3" ); - - { - merge_state m_state( delta_queue.back() ); - auto itr = m_state.begin(); - - BOOST_REQUIRE( itr != m_state.end() ); - BOOST_CHECK_EQUAL( itr.key(), "alice" ); - BOOST_CHECK_EQUAL( *itr, "1" ); - ++itr; - BOOST_CHECK_EQUAL( itr.key(), "bob" ); - BOOST_CHECK_EQUAL( *itr, "2" ); - ++itr; - BOOST_CHECK_EQUAL( itr.key(), "charlie" ); - BOOST_CHECK_EQUAL( *itr, "3" ); - ++itr; - BOOST_REQUIRE( itr == m_state.end() ); - BOOST_CHECK_THROW( *itr, koinos::exception ); - BOOST_CHECK_THROW( ++itr, koinos::exception ); - BOOST_CHECK_THROW( itr.key(), koinos::exception ); - --itr; - BOOST_CHECK_EQUAL( itr.key(), "charlie" ); - BOOST_CHECK_EQUAL( *itr, "3" ); - --itr; - BOOST_CHECK_EQUAL( itr.key(), "bob" ); - BOOST_CHECK_EQUAL( *itr, "2" ); - --itr; - BOOST_CHECK_EQUAL( itr.key(), "alice" ); - BOOST_CHECK_EQUAL( *itr, "1" ); - } - - - // alice: 4 - // bob: 5 - // charlie: 3 (not changed) - delta_queue.emplace_back( delta_queue.back()->make_child( delta_queue.back()->id() ) ); - delta_queue.back()->put( "alice", "4" ); - delta_queue.back()->put( "bob", "5" ); - - { - merge_state m_state( delta_queue.back() ); - auto itr = m_state.begin(); - - BOOST_REQUIRE( itr != m_state.end() ); - BOOST_CHECK_EQUAL( itr.key(), "alice" ); - BOOST_CHECK_EQUAL( *itr, "4" ); - ++itr; - BOOST_CHECK_EQUAL( itr.key(), "bob" ); - BOOST_CHECK_EQUAL( *itr, "5" ); - ++itr; - BOOST_CHECK_EQUAL( itr.key(), "charlie" ); - BOOST_CHECK_EQUAL( *itr, "3" ); - ++itr; - BOOST_REQUIRE( itr == m_state.end() ); - BOOST_CHECK_THROW( *itr, koinos::exception ); - BOOST_CHECK_THROW( ++itr, koinos::exception ); - BOOST_CHECK_THROW( itr.key(), koinos::exception ); - --itr; - BOOST_CHECK_EQUAL( itr.key(), "charlie" ); - BOOST_CHECK_EQUAL( *itr, "3" ); - --itr; - BOOST_CHECK_EQUAL( itr.key(), "bob" ); - BOOST_CHECK_EQUAL( *itr, "5" ); - --itr; - BOOST_CHECK_EQUAL( itr.key(), "alice" ); - BOOST_CHECK_EQUAL( *itr, "4" ); - } - - // alice: 4 (not changed) - // bob: 6 - // charlie: 3 (not changed) - delta_queue.emplace_back( delta_queue.back()->make_child( delta_queue.back()->id() ) ); - delta_queue.back()->put( "bob", "6" ); - - { - merge_state m_state( delta_queue.back() ); - auto itr = m_state.begin(); - - BOOST_REQUIRE( itr != m_state.end() ); - BOOST_CHECK_EQUAL( itr.key(), "alice" ); - BOOST_CHECK_EQUAL( *itr, "4" ); - ++itr; - BOOST_CHECK_EQUAL( itr.key(), "bob" ); - BOOST_CHECK_EQUAL( *itr, "6" ); - ++itr; - BOOST_CHECK_EQUAL( itr.key(), "charlie" ); - BOOST_CHECK_EQUAL( *itr, "3" ); - ++itr; - BOOST_REQUIRE( itr == m_state.end() ); - BOOST_CHECK_THROW( *itr, koinos::exception ); - BOOST_CHECK_THROW( ++itr, koinos::exception ); - BOOST_CHECK_THROW( itr.key(), koinos::exception ); - --itr; - BOOST_CHECK_EQUAL( itr.key(), "charlie" ); - BOOST_CHECK_EQUAL( *itr, "3" ); - --itr; - BOOST_CHECK_EQUAL( itr.key(), "bob" ); - BOOST_CHECK_EQUAL( *itr, "6" ); - --itr; - BOOST_CHECK_EQUAL( itr.key(), "alice" ); - BOOST_CHECK_EQUAL( *itr, "4" ); - } - - // alice: (removed) - // bob: 6 (not changed) - // charlie: 3 (not changed) - delta_queue.emplace_back( delta_queue.back()->make_child( delta_queue.back()->id() ) ); - delta_queue.back()->erase( "alice" ); - - { - merge_state m_state( delta_queue.back() ); - auto itr = m_state.begin(); - - BOOST_REQUIRE( itr != m_state.end() ); - BOOST_CHECK_EQUAL( itr.key(), "bob" ); - BOOST_CHECK_EQUAL( *itr, "6" ); - ++itr; - BOOST_CHECK_EQUAL( itr.key(), "charlie" ); - BOOST_CHECK_EQUAL( *itr, "3" ); - ++itr; - BOOST_REQUIRE( itr == m_state.end() ); - BOOST_CHECK_THROW( *itr, koinos::exception ); - BOOST_CHECK_THROW( ++itr, koinos::exception ); - BOOST_CHECK_THROW( itr.key(), koinos::exception ); - --itr; - BOOST_CHECK_EQUAL( itr.key(), "charlie" ); - BOOST_CHECK_EQUAL( *itr, "3" ); - --itr; - BOOST_CHECK_EQUAL( itr.key(), "bob" ); - BOOST_CHECK_EQUAL( *itr, "6" ); - } - - // alice: 4 (restored) - // bob: 6 (not changed) - // charlie: 3 (not changed) - delta_queue.emplace_back( delta_queue.back()->make_child( delta_queue.back()->id() ) ); - delta_queue.back()->put( "alice", "4" ); - - { - merge_state m_state( delta_queue.back() ); - auto itr = m_state.begin(); - - BOOST_REQUIRE( itr != m_state.end() ); - BOOST_CHECK_EQUAL( itr.key(), "alice" ); - BOOST_CHECK_EQUAL( *itr, "4" ); - ++itr; - BOOST_CHECK_EQUAL( itr.key(), "bob" ); - BOOST_CHECK_EQUAL( *itr, "6" ); - ++itr; - BOOST_CHECK_EQUAL( itr.key(), "charlie" ); - BOOST_CHECK_EQUAL( *itr, "3" ); - ++itr; - BOOST_REQUIRE( itr == m_state.end() ); - BOOST_CHECK_THROW( *itr, koinos::exception ); - BOOST_CHECK_THROW( ++itr, koinos::exception ); - BOOST_CHECK_THROW( itr.key(), koinos::exception ); - --itr; - BOOST_CHECK_EQUAL( itr.key(), "charlie" ); - BOOST_CHECK_EQUAL( *itr, "3" ); - --itr; - BOOST_CHECK_EQUAL( itr.key(), "bob" ); - BOOST_CHECK_EQUAL( *itr, "6" ); - --itr; - BOOST_CHECK_EQUAL( itr.key(), "alice" ); - BOOST_CHECK_EQUAL( *itr, "4" ); - } - - delta_queue.pop_front(); - delta_queue.pop_front(); - delta_queue.front()->commit(); - - { - merge_state m_state( delta_queue.back() ); - auto itr = m_state.begin(); - - BOOST_REQUIRE( itr != m_state.end() ); - BOOST_CHECK_EQUAL( itr.key(), "alice" ); - BOOST_CHECK_EQUAL( *itr, "4" ); - ++itr; - BOOST_CHECK_EQUAL( itr.key(), "bob" ); - BOOST_CHECK_EQUAL( *itr, "6" ); - ++itr; - BOOST_CHECK_EQUAL( itr.key(), "charlie" ); - BOOST_CHECK_EQUAL( *itr, "3" ); - ++itr; - BOOST_REQUIRE( itr == m_state.end() ); - BOOST_CHECK_THROW( *itr, koinos::exception ); - BOOST_CHECK_THROW( ++itr, koinos::exception ); - BOOST_CHECK_THROW( itr.key(), koinos::exception ); - --itr; - BOOST_CHECK_EQUAL( itr.key(), "charlie" ); - BOOST_CHECK_EQUAL( *itr, "3" ); - --itr; - BOOST_CHECK_EQUAL( itr.key(), "bob" ); - BOOST_CHECK_EQUAL( *itr, "6" ); - --itr; - BOOST_CHECK_EQUAL( itr.key(), "alice" ); - BOOST_CHECK_EQUAL( *itr, "4" ); - } - - while( delta_queue.size() > 1 ) - { - delta_queue.pop_front(); - delta_queue.front()->commit(); - - merge_state m_state( delta_queue.back() ); - auto itr = m_state.begin(); - - BOOST_REQUIRE( itr != m_state.end() ); - BOOST_CHECK_EQUAL( itr.key(), "alice" ); - BOOST_CHECK_EQUAL( *itr, "4" ); - ++itr; - BOOST_CHECK_EQUAL( itr.key(), "bob" ); - BOOST_CHECK_EQUAL( *itr, "6" ); - ++itr; - BOOST_CHECK_EQUAL( itr.key(), "charlie" ); - BOOST_CHECK_EQUAL( *itr, "3" ); - ++itr; - BOOST_REQUIRE( itr == m_state.end() ); - BOOST_CHECK_THROW( *itr, koinos::exception ); - BOOST_CHECK_THROW( ++itr, koinos::exception ); - BOOST_CHECK_THROW( itr.key(), koinos::exception ); - --itr; - BOOST_CHECK_EQUAL( itr.key(), "charlie" ); - BOOST_CHECK_EQUAL( *itr, "3" ); - --itr; - BOOST_CHECK_EQUAL( itr.key(), "bob" ); - BOOST_CHECK_EQUAL( *itr, "6" ); - --itr; - BOOST_CHECK_EQUAL( itr.key(), "alice" ); - BOOST_CHECK_EQUAL( *itr, "4" ); - } -} KOINOS_CATCH_LOG_AND_RETHROW(info) } - -BOOST_AUTO_TEST_CASE( reset_test ) -{ try { - BOOST_TEST_MESSAGE( "Creating object on transient state node" ); - - auto shared_db_lock = db.get_shared_lock(); - - crypto::multihash state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); - auto state_1 = db.create_writable_node( db.get_head( shared_db_lock )->id(), state_id, protocol::block_header(), shared_db_lock ); - object_space space; - std::string a_key = "a"; - std::string a_val = "alice"; - - chain::database_key db_key; - *db_key.mutable_space() = space; - db_key.set_key( a_key ); - auto key_size = util::converter::as< std::string >( db_key ).size(); - - BOOST_CHECK_EQUAL( state_1->put_object( space, a_key, &a_val ), a_val.size() + key_size ); - db.finalize_node( state_1->id(), shared_db_lock ); - - auto val_ptr = db.get_head( shared_db_lock )->get_object( space, a_key ); - BOOST_REQUIRE( val_ptr ); - BOOST_CHECK_EQUAL( *val_ptr, a_val ); - - BOOST_TEST_MESSAGE( "Closing and opening database" ); - shared_db_lock.reset(); - state_1.reset(); - db.close( db.get_unique_lock() ); - - BOOST_CHECK_THROW( db.reset( db.get_unique_lock() ), koinos::exception ); - - shared_db_lock = db.get_shared_lock(); - BOOST_CHECK_THROW( db.get_node_at_revision( 1, shared_db_lock ), koinos::exception ); - BOOST_CHECK_THROW( db.get_node_at_revision( 1, crypto::hash( crypto::multicodec::sha2_256, 1 ), shared_db_lock ), koinos::exception ); - BOOST_CHECK_THROW( db.get_node( crypto::hash( crypto::multicodec::sha2_256, 1 ), shared_db_lock ), koinos::exception ); - BOOST_CHECK_THROW( db.create_writable_node( crypto::multihash::zero( crypto::multicodec::sha2_256 ), crypto::hash( crypto::multicodec::sha2_256, 1 ), protocol::block_header(), shared_db_lock ), koinos::exception ); - BOOST_CHECK_THROW( db.finalize_node( crypto::hash( crypto::multicodec::sha2_256, 1 ), shared_db_lock ), koinos::exception ); - BOOST_CHECK_THROW( db.discard_node( crypto::hash( crypto::multicodec::sha2_256, 1 ), shared_db_lock ), koinos::exception ); - BOOST_CHECK_THROW( db.get_head( shared_db_lock ), koinos::exception ); - BOOST_CHECK_THROW( db.get_fork_heads( shared_db_lock ), koinos::exception ); - BOOST_CHECK_THROW( db.get_root( shared_db_lock ), koinos::exception ); - shared_db_lock.reset(); - - BOOST_CHECK_THROW( db.commit_node( crypto::hash( crypto::multicodec::sha2_256, 1 ), db.get_unique_lock() ), koinos::exception ); - - db.open( temp, []( state_db::state_node_ptr root ){}, &state_db::fifo_comparator, db.get_unique_lock() ); - - shared_db_lock = db.get_shared_lock(); - - // Object should not exist on persistent database (state node was not committed) - BOOST_CHECK( !db.get_head( shared_db_lock )->get_object( space, a_key ) ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == crypto::multihash::zero( crypto::multicodec::sha2_256 ) ); - BOOST_CHECK( db.get_head( shared_db_lock )->revision() == 0 ); - - BOOST_TEST_MESSAGE( "Creating object on committed state node" ); - - state_1 = db.create_writable_node( db.get_head( shared_db_lock )->id(), state_id, protocol::block_header(), shared_db_lock ); - BOOST_CHECK_EQUAL( state_1->put_object( space, a_key, &a_val ), a_val.size() + key_size ); - db.finalize_node( state_1->id(), shared_db_lock ); - auto state_1_id = state_1->id(); - state_1.reset(); - shared_db_lock.reset(); - db.commit_node( state_1_id, db.get_unique_lock() ); - - shared_db_lock = db.get_shared_lock(); - val_ptr = db.get_head( shared_db_lock )->get_object( space, a_key ); - BOOST_REQUIRE( val_ptr ); - BOOST_CHECK_EQUAL( *val_ptr, a_val ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == crypto::hash( crypto::multicodec::sha2_256, 1 ) ); - - BOOST_TEST_MESSAGE( "Closing and opening database" ); - shared_db_lock.reset(); - state_1.reset(); - db.close( db.get_unique_lock() ); - db.open( temp, []( state_db::state_node_ptr root ){}, &state_db::fifo_comparator, db.get_unique_lock() ); - - // State node was committed and should exist on open - shared_db_lock = db.get_shared_lock(); - val_ptr = db.get_head( shared_db_lock )->get_object( space, a_key ); - BOOST_REQUIRE( val_ptr ); - BOOST_CHECK_EQUAL( *val_ptr, a_val ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == crypto::hash( crypto::multicodec::sha2_256, 1 ) ); - BOOST_CHECK( db.get_head( shared_db_lock )->revision() == 1 ); - - BOOST_TEST_MESSAGE( "Resetting database" ); - shared_db_lock.reset(); - db.reset( db.get_unique_lock() ); - - // Object should not exist on reset db - shared_db_lock = db.get_shared_lock(); - BOOST_CHECK( !db.get_head( shared_db_lock )->get_object( space, a_key ) ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == crypto::multihash::zero( crypto::multicodec::sha2_256 ) ); - BOOST_CHECK( db.get_head( shared_db_lock )->revision() == 0 ); -} KOINOS_CATCH_LOG_AND_RETHROW(info) } - -BOOST_AUTO_TEST_CASE( anonymous_node_test ) -{ try { - BOOST_TEST_MESSAGE( "Creating object" ); - object_space space; - - auto shared_db_lock = db.get_shared_lock(); - - crypto::multihash state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); - auto state_1 = db.create_writable_node( db.get_head( shared_db_lock )->id(), state_id, protocol::block_header(), shared_db_lock ); - std::string a_key = "a"; - std::string a_val = "alice"; - - chain::database_key db_key; - *db_key.mutable_space() = space; - db_key.set_key( a_key ); - auto key_size = util::converter::as< std::string >( db_key ).size(); - - BOOST_CHECK( state_1->put_object( space, a_key, &a_val ) == a_val.size() + key_size ); - - auto ptr = state_1->get_object( space, a_key ); - BOOST_REQUIRE( ptr ); - BOOST_CHECK_EQUAL( *ptr, a_val ); - - { - BOOST_TEST_MESSAGE( "Creating anonymous state node" ); - auto anon_state = state_1->create_anonymous_node(); - - BOOST_REQUIRE( anon_state->id() == state_1->id() ); - BOOST_REQUIRE( anon_state->revision() == state_1->revision() ); - BOOST_REQUIRE( anon_state->parent_id() == state_1->parent_id() ); - - BOOST_TEST_MESSAGE( "Modifying object" ); - a_val = "alicia"; - - BOOST_CHECK( anon_state->put_object( space, a_key, &a_val ) == 1 ); - - ptr = anon_state->get_object( space, a_key ); - BOOST_REQUIRE( ptr ); - BOOST_CHECK_EQUAL( *ptr, a_val ); - - ptr = state_1->get_object( space, a_key ); - BOOST_REQUIRE( ptr ); - BOOST_CHECK_EQUAL( *ptr, "alice" ); - - BOOST_TEST_MESSAGE( "Deleting anonymous node" ); - } - - { - BOOST_TEST_MESSAGE( "Creating anonymous state node" ); - auto anon_state = state_1->create_anonymous_node(); - - BOOST_TEST_MESSAGE( "Modifying object" ); - - BOOST_CHECK( anon_state->put_object( space, a_key, &a_val ) == 1 ); - - ptr = anon_state->get_object( space, a_key ); - BOOST_REQUIRE( ptr ); - BOOST_CHECK_EQUAL( *ptr, a_val ); - - ptr = state_1->get_object( space, a_key ); - BOOST_REQUIRE( ptr ); - BOOST_CHECK_EQUAL( *ptr, "alice" ); - - BOOST_TEST_MESSAGE( "Committing anonymous node" ); - anon_state->commit(); - - ptr = state_1->get_object( space, a_key ); - BOOST_REQUIRE( ptr ); - BOOST_CHECK_EQUAL( *ptr, a_val ); - } - - ptr = state_1->get_object( space, a_key ); - BOOST_REQUIRE( ptr ); - BOOST_CHECK_EQUAL( *ptr, a_val ); - -} KOINOS_CATCH_LOG_AND_RETHROW(info) } - -BOOST_AUTO_TEST_CASE( merkle_root_test ) -{ try { - auto shared_db_lock = db.get_shared_lock(); - - auto state_1_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); - auto state_1 = db.create_writable_node( db.get_head( shared_db_lock )->id(), state_1_id, protocol::block_header(), shared_db_lock ); - - object_space space; - std::string a_key = "a"; - std::string a_val = "alice"; - std::string b_key = "b"; - std::string b_val = "bob"; - std::string c_key = "c"; - std::string c_val = "charlie"; - - state_1->put_object( space, c_key, &c_val ); - state_1->put_object( space, b_key, &b_val ); - state_1->put_object( space, a_key, &a_val ); - - chain::database_key a_db_key; - *a_db_key.mutable_space() = space; - a_db_key.set_key( a_key ); - - chain::database_key b_db_key; - *b_db_key.mutable_space() = space; - b_db_key.set_key( b_key ); - - chain::database_key c_db_key; - *c_db_key.mutable_space() = space; - c_db_key.set_key( c_key ); - - std::vector< std::string > merkle_leafs; - merkle_leafs.emplace_back( koinos::util::converter::as< std::string >( a_db_key ) ); - merkle_leafs.push_back( a_val ); - merkle_leafs.emplace_back( koinos::util::converter::as< std::string >( b_db_key ) ); - merkle_leafs.push_back( b_val ); - merkle_leafs.emplace_back( koinos::util::converter::as< std::string >( c_db_key ) ); - merkle_leafs.push_back( c_val ); - - BOOST_CHECK_THROW( state_1->merkle_root(), koinos::exception ); - db.finalize_node( state_1_id, shared_db_lock ); - - auto merkle_root = koinos::crypto::merkle_tree< std::string >( koinos::crypto::multicodec::sha2_256, merkle_leafs ).root()->hash(); - BOOST_CHECK_EQUAL( merkle_root, state_1->merkle_root() ); - - auto state_2_id = crypto::hash( crypto::multicodec::sha2_256, 2 ); - auto state_2 = db.create_writable_node( state_1_id, state_2_id, protocol::block_header(), shared_db_lock ); - - std::string d_key = "d"; - std::string d_val = "dave"; - a_val = "alicia"; - - state_2->put_object( space, a_key, &a_val ); - state_2->put_object( space, d_key, &d_val ); - state_2->remove_object( space, b_key ); - - chain::database_key d_db_key; - *d_db_key.mutable_space() = space; - d_db_key.set_key( d_key ); - - merkle_leafs.clear(); - merkle_leafs.emplace_back( koinos::util::converter::as< std::string >( a_db_key ) ); - merkle_leafs.push_back( a_val ); - merkle_leafs.emplace_back( koinos::util::converter::as< std::string >( b_db_key ) ); - merkle_leafs.push_back( "" ); - merkle_leafs.emplace_back( koinos::util::converter::as< std::string >( d_db_key ) ); - merkle_leafs.push_back( d_val ); - - db.finalize_node( state_2_id, shared_db_lock ); - merkle_root = koinos::crypto::merkle_tree< std::string >( koinos::crypto::multicodec::sha2_256, merkle_leafs ).root()->hash(); - BOOST_CHECK_EQUAL( merkle_root, state_2->merkle_root() ); - - shared_db_lock.reset(); - state_1.reset(); - state_2.reset(); - db.commit_node( state_2_id, db.get_unique_lock() ); - state_2 = db.get_node( state_2_id, db.get_shared_lock() ); - BOOST_CHECK_EQUAL( merkle_root, state_2->merkle_root() ); - -} KOINOS_CATCH_LOG_AND_RETHROW(info) } - -BOOST_AUTO_TEST_CASE( get_delta_entries_test ) -{ try { - auto shared_db_lock = db.get_shared_lock(); - - auto state_1_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); - auto state_1 = db.create_writable_node( db.get_head( shared_db_lock )->id(), state_1_id, protocol::block_header(), shared_db_lock ); - - object_space space; - std::string a_key = "a"; - std::string a_val = "alice"; - std::string b_key = "b"; - std::string b_val = "bob"; - std::string c_key = "c"; - std::string c_val = "charlie"; - - state_1->put_object( space, c_key, &c_val ); - state_1->put_object( space, b_key, &b_val ); - state_1->put_object( space, a_key, &a_val ); - - chain::database_key a_db_key; - *a_db_key.mutable_space() = space; - a_db_key.set_key( a_key ); - - chain::database_key b_db_key; - *b_db_key.mutable_space() = space; - b_db_key.set_key( b_key ); - - chain::database_key c_db_key; - *c_db_key.mutable_space() = space; - c_db_key.set_key( c_key ); - - auto entries = state_1->get_delta_entries(); - - BOOST_CHECK_EQUAL( 3, entries.size() ); - - BOOST_CHECK_EQUAL( a_key, entries[0].key() ); - BOOST_CHECK_EQUAL( space.DebugString(), entries[0].object_space().DebugString() ); - BOOST_CHECK_EQUAL( a_val, entries[0].value() ); - - BOOST_CHECK_EQUAL( b_key, entries[1].key() ); - BOOST_CHECK_EQUAL( space.DebugString(), entries[1].object_space().DebugString() ); - BOOST_CHECK_EQUAL( b_val, entries[1].value() ); - - BOOST_CHECK_EQUAL( c_key, entries[2].key() ); - BOOST_CHECK_EQUAL( space.DebugString(), entries[2].object_space().DebugString() ); - BOOST_CHECK_EQUAL( c_val, entries[2].value() ); - - db.finalize_node( state_1_id, shared_db_lock ); - - auto state_2_id = crypto::hash( crypto::multicodec::sha2_256, 2 ); - auto state_2 = db.create_writable_node( state_1_id, state_2_id, protocol::block_header(), shared_db_lock ); - - std::string d_key = "d"; - std::string d_val = "dave"; - a_val = "alicia"; - - state_2->put_object( space, a_key, &a_val ); - state_2->put_object( space, d_key, &d_val ); - state_2->remove_object( space, b_key ); - - chain::database_key d_db_key; - *d_db_key.mutable_space() = space; - d_db_key.set_key( d_key ); - - auto entries2 = state_2->get_delta_entries(); - BOOST_CHECK_EQUAL( 3, entries.size() ); - - BOOST_CHECK_EQUAL( a_key, entries2[0].key() ); - BOOST_CHECK_EQUAL( space.DebugString(), entries2[0].object_space().DebugString() ); - BOOST_CHECK_EQUAL( a_val, entries2[0].value() ); - - BOOST_CHECK_EQUAL( b_key, entries2[1].key() ); - BOOST_CHECK_EQUAL( space.DebugString(), entries2[1].object_space().DebugString() ); - BOOST_CHECK_EQUAL( false, entries2[1].has_value() ); // Deleted value - - BOOST_CHECK_EQUAL( d_key, entries2[2].key() ); - BOOST_CHECK_EQUAL( space.DebugString(), entries2[2].object_space().DebugString() ); - BOOST_CHECK_EQUAL( d_val, entries2[2].value() ); - -} KOINOS_CATCH_LOG_AND_RETHROW(info) } - -BOOST_AUTO_TEST_CASE( rocksdb_backend_test ) -{ try { - koinos::state_db::backends::rocksdb::rocksdb_backend backend; - auto temp = std::filesystem::temp_directory_path() / util::random_alphanumeric( 8 ); - - BOOST_REQUIRE_THROW( backend.open( temp ), koinos::exception ); - - BOOST_CHECK_THROW( backend.begin(), koinos::exception ); - BOOST_CHECK_THROW( backend.end(), koinos::exception ); - BOOST_CHECK_THROW( backend.put( "foo", "bar" ), koinos::exception ); - BOOST_CHECK_THROW( backend.get( "foo" ), koinos::exception ); - BOOST_CHECK_THROW( backend.erase( "foo" ), koinos::exception ); - BOOST_CHECK_THROW( backend.clear(), koinos::exception ); - BOOST_CHECK_THROW( backend.size(), koinos::exception ); - BOOST_CHECK_THROW( backend.empty(), koinos::exception ); - BOOST_CHECK_THROW( backend.find( "foo" ), koinos::exception ); - BOOST_CHECK_THROW( backend.lower_bound( "foo" ), koinos::exception ); - BOOST_CHECK_THROW( backend.flush(), koinos::exception ); - BOOST_CHECK( backend.revision() == 0 ); - BOOST_CHECK( backend.id() == koinos::crypto::multihash::zero( koinos::crypto::multicodec::sha2_256 ) ); - - std::filesystem::create_directory( temp ); - backend.open( temp ); - - auto itr = backend.begin(); - BOOST_CHECK( itr == backend.end() ); - - backend.put( "foo", "bar" ); - itr = backend.begin(); - BOOST_CHECK( itr != backend.end() ); - BOOST_CHECK( *itr == "bar" ); - - backend.put( "alice", "bob" ); - - itr = backend.begin(); - BOOST_CHECK( itr != backend.end() ); - BOOST_CHECK( *itr == "bob" ); - - ++itr; - BOOST_CHECK( *itr == "bar" ); - - ++itr; - BOOST_CHECK( itr == backend.end() ); - - --itr; - BOOST_CHECK( itr != backend.end() ); - BOOST_CHECK( *itr == "bar" ); - - itr = backend.lower_bound( "charlie" ); - BOOST_CHECK( itr != backend.end() ); - BOOST_CHECK( *itr == "bar" ); - - itr = backend.lower_bound( "foo" ); - BOOST_CHECK( itr != backend.end() ); - BOOST_CHECK( *itr == "bar" ); - - backend.put( "foo", "blob" ); - itr = backend.find( "foo" ); - BOOST_CHECK( itr != backend.end() ); - BOOST_CHECK( *itr == "blob" ); - - --itr; - BOOST_CHECK( itr != backend.end() ); - BOOST_CHECK( *itr == "bob" ); - - backend.erase( "foo" ); - - itr = backend.begin(); - BOOST_CHECK( itr != backend.end() ); - BOOST_CHECK( *itr == "bob" ); - - itr = backend.find( "foo" ); - BOOST_CHECK( itr == backend.end() ); - - backend.erase( "foo" ); - - backend.erase( "alice" ); - itr = backend.end(); - BOOST_CHECK( itr == backend.end() ); - - std::filesystem::remove_all( temp ); - -} KOINOS_CATCH_LOG_AND_RETHROW(info) } - -BOOST_AUTO_TEST_CASE( rocksdb_object_cache_test ) -{ try { - std::size_t cache_size = 1024; - koinos::state_db::backends::rocksdb::object_cache cache( cache_size ); - using value_type = koinos::state_db::backends::rocksdb::object_cache::value_type; - - std::string a_key = "a"; - std::string a_val = "alice"; - auto a_ptr = std::make_shared< const value_type >( a_val ); - - { - auto [cache_hit, val] = cache.get( a_key ); - BOOST_CHECK( !cache_hit ); - BOOST_CHECK( !val ); - } - - BOOST_CHECK( cache.put( a_key, a_ptr ) ); - - { - auto [ cache_hit, val_ptr ] = cache.get( a_key ); - BOOST_CHECK( cache_hit ); - BOOST_REQUIRE( val_ptr ); - BOOST_CHECK_EQUAL( *val_ptr, a_val ); - } - - std::string b_key = "b"; - std::string b_val = "bob"; - auto b_ptr = std::make_shared< const value_type >( b_val ); - - cache.put( b_key, b_ptr ); - - { - auto [ cache_hit, val_ptr ] = cache.get( b_key ); - BOOST_CHECK( cache_hit ); - BOOST_REQUIRE( val_ptr ); - BOOST_CHECK_EQUAL( *val_ptr, b_val ); - } - - // Will put 'a' first in the cache to evict 'b' - cache.get( a_key ); - - std::string fill_key = "f"; - std::string fill_val( cache_size - a_val.size() - b_val.size() + 1, 'f' ); - auto fill_ptr = std::make_shared< const value_type >( fill_val ); - BOOST_CHECK( cache.put( fill_key, fill_ptr ) ); - - { - auto [ cache_hit, val_ptr ] = cache.get( b_key ); - BOOST_CHECK( !cache_hit ); - BOOST_CHECK( !val_ptr ); - } - - { - auto [ cache_hit, val_ptr ] = cache.get( a_key ); - BOOST_CHECK( cache_hit ); - BOOST_REQUIRE( val_ptr ); - BOOST_CHECK_EQUAL( *val_ptr, a_val ); - } - - BOOST_CHECK( cache.put( fill_key, fill_ptr ) ); - { - auto [ cache_hit, val_ptr ] = cache.get( b_key ); - BOOST_CHECK( !cache_hit ); - BOOST_CHECK( !val_ptr ); - } - - std::string null_key = "n"; - std::shared_ptr< const value_type > null_ptr; - BOOST_CHECK( !cache.put( null_key, null_ptr ) ); - - { - auto [ cache_hit, val_ptr ] = cache.get( null_key ); - BOOST_CHECK( cache_hit ); - BOOST_REQUIRE( !val_ptr ); - } - -} KOINOS_CATCH_LOG_AND_RETHROW(info) } - -BOOST_AUTO_TEST_CASE( map_backend_test ) -{ try { - koinos::state_db::backends::map::map_backend backend; - - auto itr = backend.begin(); - BOOST_CHECK( itr == backend.end() ); - - backend.put( "foo", "bar" ); - itr = backend.begin(); - BOOST_CHECK( itr != backend.end() ); - BOOST_CHECK( *itr == "bar" ); - - backend.put( "alice", "bob" ); - - itr = backend.begin(); - BOOST_CHECK( itr != backend.end() ); - BOOST_CHECK( *itr == "bob" ); - - ++itr; - BOOST_CHECK( *itr == "bar" ); - - ++itr; - BOOST_CHECK( itr == backend.end() ); - - --itr; - BOOST_CHECK( itr != backend.end() ); - BOOST_CHECK( *itr == "bar" ); - - itr = backend.lower_bound( "charlie" ); - BOOST_CHECK( itr != backend.end() ); - BOOST_CHECK( *itr == "bar" ); - - itr = backend.lower_bound( "foo" ); - BOOST_CHECK( itr != backend.end() ); - BOOST_CHECK( *itr == "bar" ); - - backend.put( "foo", "blob" ); - itr = backend.find( "foo" ); - BOOST_CHECK( itr != backend.end() ); - BOOST_CHECK( *itr == "blob" ); - - --itr; - BOOST_CHECK( itr != backend.end() ); - BOOST_CHECK( *itr == "bob" ); - - backend.erase( "foo" ); - - itr = backend.begin(); - BOOST_CHECK( itr != backend.end() ); - BOOST_CHECK( *itr == "bob" ); - - itr = backend.find( "foo" ); - BOOST_CHECK( itr == backend.end() ); - - backend.erase( "foo" ); - - backend.erase( "alice" ); - itr = backend.end(); - BOOST_CHECK( itr == backend.end() ); - - backend.put( "foo", "bar" ); - BOOST_REQUIRE( backend.get( "foo" ) ); - BOOST_CHECK_EQUAL( *backend.get( "foo" ), "bar" ); - -} KOINOS_CATCH_LOG_AND_RETHROW(info) } - -BOOST_AUTO_TEST_CASE( fork_resolution ) -{ try { - /** - * The final fork graph looks like the following: - * - * / state_1 (100) --- state_4 (110) - * / \ - * genesis --- state_2 (99) \ state_5 (110) - * \ - * \ state_3 (101) - */ - - BOOST_TEST_MESSAGE( "Test default FIFO fork resolution" ); - - auto shared_db_lock = db.get_shared_lock(); - auto genesis_id = db.get_head( shared_db_lock )->id(); - - protocol::block_header header; - header.set_timestamp( 100 ); - - crypto::multihash state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); - auto state_1 = db.create_writable_node( db.get_head( shared_db_lock )->id(), state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_1 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == genesis_id ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); - - header.set_timestamp( 99 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 2 ); - auto state_2 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_2 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); - - header.set_timestamp( 101 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 3 ); - auto state_3 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_3 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); - - header.set_timestamp( 110 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 4 ); - auto state_4 = db.create_writable_node( state_1->id(), state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_4 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_4->id() ); - - state_id = crypto::hash( crypto::multicodec::sha2_256, 5 ); - auto state_5 = db.create_writable_node( state_1->id(), state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_5 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_4->id() ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_4->id() ); - - shared_db_lock.reset(); - state_1.reset(); - state_2.reset(); - state_3.reset(); - state_4.reset(); - state_5.reset(); - - BOOST_TEST_MESSAGE( "Test block time fork resolution" ); - - db.close( db.get_unique_lock() ); - db.open( temp, [&]( state_node_ptr ){}, &state_db::block_time_comparator, db.get_unique_lock() ); - shared_db_lock = db.get_shared_lock(); - - header.set_timestamp( 100 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); - state_1 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_1 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == genesis_id ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); - - header.set_timestamp( 99 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 2 ); - state_2 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_2 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); - - header.set_timestamp( 101 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 3 ); - state_3 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_3 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock)->id() == state_2->id() ); - - header.set_timestamp( 110 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 4 ); - state_4 = db.create_writable_node( state_1->id(), state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_4 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_4->id() ); - - state_id = crypto::hash( crypto::multicodec::sha2_256, 5 ); - state_5 = db.create_writable_node( state_1->id(), state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_5 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_4->id() ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_4->id() ); - - shared_db_lock.reset(); - state_1.reset(); - state_2.reset(); - state_3.reset(); - state_4.reset(); - state_5.reset(); - - BOOST_TEST_MESSAGE( "Test pob fork resolution" ); - - db.close( db.get_unique_lock() ); - db.open( temp, [&]( state_node_ptr ){}, &state_db::pob_comparator, db.get_unique_lock() ); - shared_db_lock = db.get_shared_lock(); - - std::string signer1 = "signer1"; - std::string signer2 = "signer2"; - std::string signer3 = "signer3"; - std::string signer4 = "signer4"; - std::string signer5 = "signer5"; - - // BEGIN: Mimic block time behavior (as long as signers are different) - - header.set_timestamp( 100 ); - header.set_signer( signer1 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); - state_1 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_1 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == genesis_id ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); - - header.set_timestamp( 99 ); - header.set_signer( signer2 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 2 ); - state_2 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_2 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); - - header.set_timestamp( 101 ); - header.set_signer( signer3 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 3 ); - state_3 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_3 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); - - header.set_timestamp( 110 ); - header.set_signer( signer4 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 4 ); - state_4 = db.create_writable_node( state_1->id(), state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_4 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_4->id() ); - - header.set_signer( signer5 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 5 ); - state_5 = db.create_writable_node( state_1->id(), state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_5 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_4->id() ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_4->id() ); - - // END: Mimic block time behavior (as long as signers are different) - - shared_db_lock.reset(); - state_1.reset(); - state_2.reset(); - state_3.reset(); - state_4.reset(); - state_5.reset(); - - db.close( db.get_unique_lock() ); - db.open( temp, [&]( state_node_ptr ){}, &state_db::pob_comparator, db.get_unique_lock() ); - shared_db_lock = db.get_shared_lock(); - - // BEGIN: Create two forks, then double produce on the newer fork - - /** - * / state_3 (height: 2, time: 101, signer: signer3) <-- Double production - * / - * / state_1 (height: 1, time: 100) - state_4 (height: 2, time: 102, signer: signer3) <-- Double production - * / - * genesis --- state_2 (height: 1, time: 99) <-- Resulting head - * - * - */ - - header.set_timestamp( 100 ); - header.set_signer( signer1 ); - header.set_height( 1 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); - state_1 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_1 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == genesis_id ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); - - header.set_timestamp( 99 ); - header.set_signer( signer2 ); - header.set_height( 1 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 2 ); - state_2 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_2 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); - - header.set_timestamp( 101 ); - header.set_signer( signer3 ); - header.set_height( 2 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 3 ); - state_3 = db.create_writable_node( state_1->id(), state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_3 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_3->id() ); - - header.set_timestamp( 102 ); - header.set_signer( signer3 ); - header.set_height( 2 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 4 ); - state_4 = db.create_writable_node( state_1->id(), state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_4 ); - BOOST_CHECK( db.get_head( shared_db_lock)->id() == state_3->id() ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); - - /** - * Fork heads - * - * / state_3 (height: 2, time: 101) - * / - * / state_1 (height: 1, time: 100) - state_4 (height: 2, time: 102) - * / - * genesis --- state_2 (height: 1, time: 99) - * - * - */ - - auto fork_heads = db.get_fork_heads( shared_db_lock ); - BOOST_REQUIRE( fork_heads.size() == 3 ); - auto it = std::find_if( std::begin( fork_heads ), std::end( fork_heads ), [&]( state_node_ptr p ) { return p->id() == state_2->id(); } ); - BOOST_REQUIRE( it != std::end( fork_heads ) ); - it = std::find_if( std::begin( fork_heads ), std::end( fork_heads ), [&]( state_node_ptr p ) { return p->id() == state_3->id(); } ); - BOOST_REQUIRE( it != std::end( fork_heads ) ); - it = std::find_if( std::begin( fork_heads ), std::end( fork_heads ), [&]( state_node_ptr p ) { return p->id() == state_4->id(); } ); - BOOST_REQUIRE( it != std::end( fork_heads ) ); - fork_heads.clear(); - - // END: Create two forks, then double produce on the newer fork - - shared_db_lock.reset(); - state_1.reset(); - state_2.reset(); - state_3.reset(); - state_4.reset(); - state_5.reset(); - - db.close( db.get_unique_lock() ); - db.open( temp, [&]( state_node_ptr ){}, &state_db::pob_comparator, db.get_unique_lock() ); - shared_db_lock = db.get_shared_lock(); - - // BEGIN: Create two forks, then double produce on the older fork - - /** - * Resulting head / state_3 (height: 2, time: 101, signer: signer3) <-- Double production - * V / - * / state_1 (height: 1, time: 99) --- state_4 (height: 2, time: 102, signer: signer3) <-- Double production - * / - * genesis --- state_2 (height: 1, time: 100) - * - * - */ - - header.set_timestamp( 99 ); - header.set_signer( signer1 ); - header.set_height( 1 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); - state_1 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_1 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == genesis_id ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); - - header.set_timestamp( 100 ); - header.set_signer( signer2 ); - header.set_height( 1 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 2 ); - state_2 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_2 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); - - header.set_timestamp( 101 ); - header.set_signer( signer3 ); - header.set_height( 2 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 3 ); - state_3 = db.create_writable_node( state_1->id(), state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_3 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_3->id() ); - - header.set_timestamp( 102 ); - header.set_signer( signer3 ); - header.set_height( 2 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 4 ); - state_4 = db.create_writable_node( state_1->id(), state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_4 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_3->id() ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); - - /** - * Fork heads - * - * / state_1 (height: 1, time: 99) - * / - * genesis --- state_2 (height: 1, time: 100) - * - * - */ - - fork_heads = db.get_fork_heads( shared_db_lock ); - BOOST_REQUIRE( fork_heads.size() == 2 ); - it = std::find_if( std::begin( fork_heads ), std::end( fork_heads ), [&]( state_node_ptr p ) { return p->id() == state_1->id(); } ); - BOOST_REQUIRE( it != std::end( fork_heads ) ); - it = std::find_if( std::begin( fork_heads ), std::end( fork_heads ), [&]( state_node_ptr p ) { return p->id() == state_2->id(); } ); - BOOST_REQUIRE( it != std::end( fork_heads ) ); - fork_heads.clear(); - - // END: Create two forks, then double produce on the older fork - - shared_db_lock.reset(); - state_1.reset(); - state_2.reset(); - state_3.reset(); - state_4.reset(); - state_5.reset(); - - db.close( db.get_unique_lock() ); - db.open( temp, [&]( state_node_ptr ){}, &state_db::pob_comparator, db.get_unique_lock() ); - shared_db_lock = db.get_shared_lock(); - - // BEGIN: Edge case when double production is the first block - - /** - * - * - * / state_1 (height: 1, time: 99, signer: signer1) <--- Double production - * / - * genesis --- state_2 (height: 1, time: 100, signer: signer1) <--- Double production - * - * - */ - - header.set_timestamp( 99 ); - header.set_signer( signer1 ); - header.set_height( 1 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); - state_1 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_1 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == genesis_id ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); - - header.set_timestamp( 100 ); - header.set_signer( signer1 ); - header.set_height( 1 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 2 ); - state_2 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_2 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == genesis_id ); - - /** - * Fork heads - * - * genesis - * - */ - - fork_heads = db.get_fork_heads( shared_db_lock ); - BOOST_REQUIRE( fork_heads.size() == 1 ); - it = std::find_if( std::begin( fork_heads ), std::end( fork_heads ), [&]( state_node_ptr p ) { return p->id() == genesis_id; } ); - BOOST_REQUIRE( it != std::end( fork_heads ) ); - fork_heads.clear(); - - // END: Edge case when double production is the first block - -} KOINOS_CATCH_LOG_AND_RETHROW(info) } - -BOOST_AUTO_TEST_CASE( restart_cache ) -{ try { - - auto shared_db_lock = db.get_shared_lock(); - crypto::multihash state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); - auto state_1 = db.create_writable_node( db.get_head( shared_db_lock )->id(), state_id, protocol::block_header(), shared_db_lock ); - BOOST_REQUIRE( state_1 ); - - object_space space; - std::string a_key = "a"; - std::string a_val = "alice"; - - chain::database_key db_key; - *db_key.mutable_space() = space; - db_key.set_key( a_key ); - - state_1->put_object( space, a_key, &a_val ); - - { - auto [ptr, key] = state_1->get_next_object( space, std::string() ); - - BOOST_REQUIRE( ptr ); - BOOST_CHECK_EQUAL( *ptr, a_val ); - BOOST_CHECK_EQUAL( key, a_key ); - } - - db.finalize_node( state_id, shared_db_lock ); - state_1.reset(); - shared_db_lock.reset(); - - db.commit_node( state_id, db.get_unique_lock() ); - - db.close( db.get_unique_lock() ); - db.open( temp, [&]( state_db::state_node_ptr root ){}, &state_db::fifo_comparator, db.get_unique_lock() ); - shared_db_lock = db.get_shared_lock(); - - state_1 = db.get_root( shared_db_lock ); - { - auto [ptr, key] = state_1->get_next_object( space, std::string() ); - - BOOST_REQUIRE( ptr ); - BOOST_CHECK_EQUAL( *ptr, a_val ); - BOOST_CHECK_EQUAL( key, a_key ); - } - -} KOINOS_CATCH_LOG_AND_RETHROW(info) } - -BOOST_AUTO_TEST_CASE( persistence ) -{ try { - - BOOST_TEST_MESSAGE( "Checking persistence when backed by rocksdb" ); - object_space space; - std::string a_key = "a"; - std::string a_val = "alice"; - - auto shared_db_lock = db.get_shared_lock(); - - chain::database_key db_key; - *db_key.mutable_space() = space; - db_key.set_key( a_key ); - auto key_size = util::converter::as< std::string >( db_key ).size(); - - crypto::multihash state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); - auto state_1 = db.create_writable_node( db.get_head( shared_db_lock )->id(), state_id, protocol::block_header(), shared_db_lock ); - BOOST_REQUIRE( state_1 ); - BOOST_CHECK_EQUAL( state_1->put_object( space, a_key, &a_val ), a_val.size() + key_size ); - - db.finalize_node( state_id, shared_db_lock ); - - auto ptr = state_1->get_object( space, a_key ); - BOOST_REQUIRE( ptr ); - BOOST_CHECK_EQUAL( *ptr, a_val ); - - state_1.reset(); - shared_db_lock.reset(); - db.commit_node( state_id, db.get_unique_lock() ); - - db.close( db.get_unique_lock() ); - db.open( temp, [&]( state_db::state_node_ptr root ){}, &state_db::fifo_comparator, db.get_unique_lock() ); - - shared_db_lock = db.get_shared_lock(); - state_1 = db.get_node( state_id, shared_db_lock ); - BOOST_REQUIRE( state_1 ); - - ptr = state_1->get_object( space, a_key ); - BOOST_REQUIRE( ptr ); - BOOST_CHECK_EQUAL( *ptr, a_val ); - - state_1.reset(); - shared_db_lock.reset(); - db.close( db.get_unique_lock() ); - - BOOST_TEST_MESSAGE( "Checking transience when backed by std::map" ); - db.open( {}, [&]( state_db::state_node_ptr root ){}, &state_db::fifo_comparator, db.get_unique_lock() ); - - shared_db_lock = db.get_shared_lock(); - state_1 = db.create_writable_node( db.get_head( shared_db_lock )->id(), state_id, protocol::block_header(), shared_db_lock ); - BOOST_REQUIRE( state_1 ); - BOOST_CHECK_EQUAL( state_1->put_object( space, a_key, &a_val ), a_val.size() + key_size ); - - db.finalize_node( state_id, shared_db_lock ); - ptr = state_1->get_object( space, a_key ); - BOOST_REQUIRE( ptr ); - BOOST_CHECK_EQUAL( *ptr, a_val ); - - state_1.reset(); - shared_db_lock.reset(); - db.commit_node( state_id, db.get_unique_lock() ); - - db.close( db.get_unique_lock() ); - db.open( {}, [&]( state_db::state_node_ptr root ){}, &state_db::fifo_comparator, db.get_unique_lock() ); - - shared_db_lock = db.get_shared_lock(); - state_1 = db.get_node( state_id, shared_db_lock ); - BOOST_REQUIRE( !state_1 ); - - ptr = db.get_head( shared_db_lock )->get_object( space, a_key ); - BOOST_REQUIRE( !ptr ); - -} KOINOS_CATCH_LOG_AND_RETHROW(info) } - -BOOST_AUTO_TEST_CASE( clone_node ) -{ try { - BOOST_TEST_MESSAGE( "Check clone of un-finalized node" ); - - object_space space; - std::string a_key = "a"; - std::string a_val = "alice"; - std::string b_key = "bob"; - std::string b_val = "bob"; - std::string c_key = "charlie"; - std::string c_val = "charlie"; - std::string d_key = "dave"; - std::string d_val = "dave"; - - auto shared_db_lock = db.get_shared_lock(); - - chain::database_key db_key; - *db_key.mutable_space() = space; - db_key.set_key( a_key ); - - crypto::multihash state_1a_id = crypto::hash( crypto::multicodec::sha2_256, 0x1a ); - auto state_1a = db.create_writable_node( db.get_head( shared_db_lock )->id(), state_1a_id, protocol::block_header(), shared_db_lock ); - BOOST_REQUIRE( state_1a ); - state_1a->put_object( space, a_key, &a_val ); - state_1a->put_object( space, b_key, &b_val ); - db.finalize_node( state_1a_id, shared_db_lock ); - - crypto::multihash state_2a_id = crypto::hash( crypto::multicodec::sha2_256, 0x2a ); - auto state_2a = db.create_writable_node( state_1a_id, state_2a_id, protocol::block_header(), shared_db_lock ); - BOOST_REQUIRE( state_2a ); - state_2a->put_object( space, c_key, &c_val ); - state_2a->remove_object( space, a_key ); - - crypto::multihash state_2b_id = crypto::hash( crypto::multicodec::sha2_256, 0x2b ); - auto state_2b = db.clone_node( state_2a_id, state_2b_id, protocol::block_header(), shared_db_lock ); - BOOST_REQUIRE( state_2b ); - BOOST_CHECK( !state_2b->is_finalized() ); - BOOST_CHECK( !state_2b->get_object( space, a_key ) ); - BOOST_REQUIRE( state_2b->get_object( space, b_key ) ); - BOOST_CHECK_EQUAL( *state_2b->get_object( space, b_key ), b_val ); - BOOST_REQUIRE( state_2b->get_object( space, c_key ) ); - BOOST_CHECK_EQUAL( *state_2b->get_object( space, c_key ), c_val ); - - state_2b->remove_object( space, b_key ); - state_2b->put_object( space, d_key, &d_val ); - - BOOST_REQUIRE( state_2a->get_object( space, b_key ) ); - BOOST_CHECK_EQUAL( *state_2a->get_object( space, b_key ), b_val ); - BOOST_CHECK( !state_2a->get_object( space, d_key ) ); - - BOOST_TEST_MESSAGE( "Checking clone of a finalized node" ); - - crypto::multihash state_1b_id = crypto::hash( crypto::multicodec::sha2_256, 0x1b ); - BOOST_REQUIRE_THROW( db.clone_node( state_1a_id, state_1b_id, protocol::block_header(), shared_db_lock ), illegal_argument ); -} KOINOS_CATCH_LOG_AND_RETHROW(info) } - -BOOST_AUTO_TEST_CASE( get_all_nodes ) -{ try { - BOOST_TEST_MESSAGE( "Create state nodes" ); - - auto shared_db_lock = db.get_shared_lock(); - auto root_id = db.get_root( shared_db_lock )->id(); - - crypto::multihash state_1a_id = crypto::hash( crypto::multicodec::sha2_256, 0x1a ); - auto state_1a = db.create_writable_node( root_id, state_1a_id, protocol::block_header(), shared_db_lock ); - BOOST_REQUIRE( state_1a ); - db.finalize_node( state_1a_id, shared_db_lock ); - - crypto::multihash state_1b_id = crypto::hash( crypto::multicodec::sha2_256, 0x1b ); - auto state_1b = db.create_writable_node( root_id, state_1b_id, protocol::block_header(), shared_db_lock ); - BOOST_REQUIRE( state_1b ); - - crypto::multihash state_2a_id = crypto::hash( crypto::multicodec::sha2_256, 0x2a ); - auto state_2a = db.create_writable_node( state_1a_id, state_2a_id, protocol::block_header(), shared_db_lock ); - BOOST_REQUIRE( state_2a ); - - crypto::multihash state_2b_id = crypto::hash( crypto::multicodec::sha2_256, 0x2b ); - auto state_2b = db.create_writable_node( state_1a_id, state_2b_id, protocol::block_header(), shared_db_lock ); - BOOST_REQUIRE( state_2b ); - - BOOST_TEST_MESSAGE( "Check all state nodes" ); - - auto nodes = db.get_all_nodes( shared_db_lock ); - BOOST_REQUIRE_EQUAL( nodes.size(), 5 ); - BOOST_CHECK( nodes[0]->id() == root_id ); - BOOST_CHECK( nodes[1]->id() == state_1b_id ); - BOOST_CHECK( nodes[2]->id() == state_2a_id ); - BOOST_CHECK( nodes[3]->id() == state_1a_id ); - BOOST_CHECK( nodes[4]->id() == state_2b_id ); - - BOOST_TEST_MESSAGE( "Commit 1a" ); - - nodes.clear(); - state_1a.reset(); - state_1b.reset(); - state_2a.reset(); - state_2b.reset(); - shared_db_lock.reset(); - - auto unique_db_lock = db.get_unique_lock(); - db.commit_node( state_1a_id, unique_db_lock ); - - BOOST_TEST_MESSAGE( "Check all state nodes" ); - - nodes = db.get_all_nodes( unique_db_lock ); - BOOST_REQUIRE_EQUAL( nodes.size(), 3 ); - BOOST_CHECK( nodes[0]->id() == state_2a_id ); - BOOST_CHECK( nodes[1]->id() == state_1a_id ); - BOOST_CHECK( nodes[2]->id() == state_2b_id ); - -} KOINOS_CATCH_LOG_AND_RETHROW(info) } - -BOOST_AUTO_TEST_SUITE_END()