From 239239b576a339ee468677df4626f5b84e1fea06 Mon Sep 17 00:00:00 2001 From: Michael Vandeberg Date: Mon, 4 Dec 2023 11:32:38 -0800 Subject: [PATCH 01/26] Refactor for cmake submodule --- .gitmodules | 3 + CMakeLists.txt | 220 +++---- cmake | 1 + cmake/CodeCoverage.cmake | 444 --------------- cmake/Hunter/config.cmake | 128 ----- cmake/Hunter/passwords.cmake | 8 - cmake/HunterGate.cmake | 539 ------------------ cmake/config.cmake.in | 16 - cmake/gcov_for_clang.sh | 8 - cmake/koinos_state_dbConfigVersion.cmake.in | 67 --- cmake/pkg-config.pc.in | 4 - libraries/state_db/CMakeLists.txt | 86 +-- .../koinos/state_db/backends/types.hpp | 1 + tests/BoostTestTargetConfig.h | 7 - tests/BoostTestTargets.cmake | 242 -------- tests/BoostTestTargetsDynamic.h | 8 - tests/BoostTestTargetsIncluded.h | 7 - tests/BoostTestTargetsStatic.h | 7 - tests/CMakeLists.txt | 40 +- tests/CopyResourcesToBuildTree.cmake | 83 --- tests/GetForceIncludeDefinitions.cmake | 44 -- tests/tests/main.cpp | 2 +- 22 files changed, 122 insertions(+), 1843 deletions(-) create mode 100644 .gitmodules create mode 160000 cmake delete mode 100644 cmake/CodeCoverage.cmake delete mode 100644 cmake/Hunter/config.cmake delete mode 100644 cmake/Hunter/passwords.cmake delete mode 100644 cmake/HunterGate.cmake delete mode 100644 cmake/config.cmake.in delete mode 100755 cmake/gcov_for_clang.sh delete mode 100644 cmake/koinos_state_dbConfigVersion.cmake.in delete mode 100644 cmake/pkg-config.pc.in delete mode 100644 tests/BoostTestTargetConfig.h delete mode 100644 tests/BoostTestTargets.cmake delete mode 100644 tests/BoostTestTargetsDynamic.h delete mode 100644 tests/BoostTestTargetsIncluded.h delete mode 100644 tests/BoostTestTargetsStatic.h delete mode 100644 tests/CopyResourcesToBuildTree.cmake delete mode 100644 tests/GetForceIncludeDefinitions.cmake diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..e763590 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "cmake"] + path = cmake + url = https://github.com/koinos/koinos-cmake.git diff --git a/CMakeLists.txt b/CMakeLists.txt index 9649297..ca4f71c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,169 +1,87 @@ -cmake_minimum_required(VERSION 3.10.2) - -find_program(CCACHE_PROGRAM ccache) -if(CCACHE_PROGRAM) - set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "${CCACHE_PROGRAM}") - set(CMAKE_XCODE_ATTRIBUTE_CC "${CMAKE_SOURCE_DIR}/ci/ccache_clang") - set(CMAKE_XCODE_ATTRIBUTE_CXX "${CMAKE_SOURCE_DIR}/ci/ccache_clang++") - set(CMAKE_XCODE_ATTRIBUTE_LD "${CMAKE_SOURCE_DIR}/ci/ccache_clang") - set(CMAKE_XCODE_ATTRIBUTE_LDPLUSPLUS "${CMAKE_SOURCE_DIR}/ci/ccache_clang++") -endif() - -option(HUNTER_RUN_UPLOAD "Upload Hunter packages to binary cache server" OFF) - -set( - HUNTER_CACHE_SERVERS - "https://github.com/koinos/hunter-cache" - CACHE - STRING - "Default cache server" -) +cmake_minimum_required(VERSION 3.19.0) -set( - HUNTER_PASSWORDS_PATH - "${CMAKE_CURRENT_LIST_DIR}/cmake/Hunter/passwords.cmake" - CACHE - FILEPATH - "Hunter passwords" -) +cmake_policy(SET CMP0074 NEW) +cmake_policy(SET CMP0135 NEW) +cmake_policy(SET CMP0114 NEW) +cmake_policy(SET CMP0144 NEW) -include("cmake/HunterGate.cmake") +include(cmake/Koinos.cmake) -HunterGate( - URL "https://github.com/cpp-pm/hunter/archive/v0.24.14.tar.gz" - SHA1 "00901c19eefc02d24b16705b5f5a2b4f093a73fb" - LOCAL -) +project(koinos_state_db + VERSION 1.1.0 + DESCRIPTION "The Koinos statedb library" + LANGUAGES CXX C) -project(koinos_state_db VERSION 1.1.0 LANGUAGES CXX C) - -# -# CONFIGURATION -# -include(GNUInstallDirs) - -set(KOINOS_LIB_TARGET_NAME ${PROJECT_NAME}) -set(KOINOS_LIB_TARGET_SHORT_NAME "state_db") -set(KOINOS_LIB_CONFIG_INSTALL_DIR "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}" CACHE INTERNAL "") -set(KOINOS_LIB_INCLUDE_INSTALL_DIR "${CMAKE_INSTALL_INCLUDEDIR}") -set(KOINOS_LIB_TARGETS_EXPORT_NAME "${PROJECT_NAME}Targets") -set(KOINOS_LIB_CMAKE_CONFIG_TEMPLATE "cmake/config.cmake.in") -set(KOINOS_LIB_CMAKE_CONFIG_DIR "${CMAKE_CURRENT_BINARY_DIR}") -set(KOINOS_LIB_CMAKE_VERSION_CONFIG_FILE "${KOINOS_LIB_CMAKE_CONFIG_DIR}/${PROJECT_NAME}ConfigVersion.cmake") -set(KOINOS_LIB_CMAKE_PROJECT_CONFIG_FILE "${KOINOS_LIB_CMAKE_CONFIG_DIR}/${PROJECT_NAME}Config.cmake") -set(KOINOS_LIB_CMAKE_PROJECT_TARGETS_FILE "${KOINOS_LIB_CMAKE_CONFIG_DIR}/${PROJECT_NAME}Targets.cmake") -set(KOINOS_LIB_PKGCONFIG_INSTALL_DIR "${CMAKE_INSTALL_LIBDIR}/pkgconfig") - -if (${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.12.0") - cmake_policy(SET CMP0074 NEW) -endif () - -option(BUILD_TESTS "Build Tests" ON) -option(FORCE_COLORED_OUTPUT "Always produce ANSI-colored output (GNU/Clang only)." OFF) - -# This is to force color output when using ccache with Unix Makefiles -if(${FORCE_COLORED_OUTPUT}) - if( "${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU" ) - add_compile_options (-fdiagnostics-color=always) - elseif( "${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" OR "${CMAKE_CXX_COMPILER_ID}" STREQUAL "AppleClang" ) - add_compile_options (-fcolor-diagnostics) - endif () -endif () - -list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake") - -set(CMAKE_CXX_STANDARD 17) -set(CMAKE_CXX_STANDARD_REQUIRED ON) -set(CMAKE_CXX_EXTENSIONS OFF) -set(CMAKE_CXX_VISIBILITY_PRESET hidden) -set(Boost_NO_BOOST_CMAKE ON) - -if(COVERAGE) - include(CodeCoverage) - append_coverage_compiler_flags() - setup_target_for_coverage_lcov( - NAME coverage - LCOV_ARGS "--quiet" "--no-external" - EXECUTABLE koinos_state_db_tests - EXCLUDE "libraries/vendor/*" "build/generated/*") -endif() - -hunter_add_package(Boost COMPONENTS test exception log) -hunter_add_package(ethash) -hunter_add_package(libsecp256k1-vrf) -hunter_add_package(nlohmann_json) -hunter_add_package(OpenSSL) -hunter_add_package(rocksdb) -hunter_add_package(yaml-cpp) -hunter_add_package(Protobuf) -hunter_add_package(gRPC) -hunter_add_package(abseil) -hunter_add_package(re2) -hunter_add_package(c-ares) -hunter_add_package(ZLIB) - -hunter_add_package(koinos_util) -hunter_add_package(koinos_log) -hunter_add_package(koinos_exception) -hunter_add_package(koinos_proto) -hunter_add_package(koinos_crypto) - -find_package(Boost CONFIG REQUIRED COMPONENTS program_options log log_setup exception) -find_package(RocksDB CONFIG REQUIRED) -find_package(Protobuf CONFIG REQUIRED) -find_package(ethash CONFIG REQUIRED) -find_package(libsecp256k1-vrf CONFIG REQUIRED) -find_package(nlohmann_json CONFIG REQUIRED) -find_package(OpenSSL REQUIRED) -find_package(yaml-cpp CONFIG REQUIRED) -find_package(gRPC CONFIG REQUIRED) -find_package(absl CONFIG REQUIRED) -find_package(re2 CONFIG REQUIRED) -find_package(c-ares CONFIG REQUIRED) -find_package(ZLIB CONFIG REQUIRED) - -find_package(koinos_util CONFIG REQUIRED) -find_package(koinos_log CONFIG REQUIRED) -find_package(koinos_exception CONFIG REQUIRED) -find_package(koinos_proto CONFIG REQUIRED) -find_package(koinos_crypto CONFIG REQUIRED) +koinos_define_version() +koinos_coverage( + EXECUTABLE + koinos_statedb_tests + EXCLUDE + "tests/*" +) -add_subdirectory(libraries) -if (BUILD_TESTS) - add_subdirectory(tests) -endif() - -# Install a pkg-config file, so other tools can find this. -configure_file( - "${CMAKE_CURRENT_SOURCE_DIR}/cmake/pkg-config.pc.in" - "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}.pc" +koinos_add_package(Boost CONFIG REQUIRED + ADD_COMPONENTS log exception test + FIND_COMPONENTS log log_setup ) -# -# INSTALL -# install header files, generate and install cmake config files for find_package() -# +koinos_add_package(rocksdb NAME RocksDB CONFIG REQUIRED) +koinos_add_package(Protobuf CONFIG REQUIRED) +koinos_add_package(ethash CONFIG REQUIRED) +koinos_add_package(libsecp256k1-vrf CONFIG REQUIRED) +koinos_add_package(nlohmann_json CONFIG REQUIRED) +koinos_add_package(OpenSSL REQUIRED) +koinos_add_package(yaml-cpp) +koinos_add_package(gRPC CONFIG REQUIRED) + +koinos_add_package(koinos_proto CONFIG REQUIRED) +koinos_add_package(koinos_exception CONFIG REQUIRED) +koinos_add_package(koinos_log CONFIG REQUIRED) +koinos_add_package(koinos_crypto CONFIG REQUIRED) +koinos_add_package(koinos_util CONFIG REQUIRED) + +include(GNUInstallDirs) include(CMakePackageConfigHelpers) -configure_file( - "cmake/${PROJECT_NAME}ConfigVersion.cmake.in" - ${KOINOS_LIB_CMAKE_VERSION_CONFIG_FILE} - @ONLY -) -configure_file( - ${KOINOS_LIB_CMAKE_CONFIG_TEMPLATE} - ${KOINOS_LIB_CMAKE_PROJECT_CONFIG_FILE} - @ONLY +add_subdirectory(libraries) +add_subdirectory(tests) + +export( + TARGETS state_db + NAMESPACE Koinos:: + FILE ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-targets.cmake ) install( - FILES ${KOINOS_LIB_CMAKE_PROJECT_CONFIG_FILE} ${KOINOS_LIB_CMAKE_VERSION_CONFIG_FILE} - DESTINATION ${KOINOS_LIB_CONFIG_INSTALL_DIR} + TARGETS state_db + EXPORT ${PROJECT_NAME}-targets + INCLUDES DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} ) install( - FILES "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}.pc" - DESTINATION ${KOINOS_LIB_PKGCONFIG_INSTALL_DIR} + EXPORT ${PROJECT_NAME}-targets + NAMESPACE Koinos:: + DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME} ) + +configure_package_config_file( + cmake/Templates/project.cmake.in + ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config.cmake + INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}) + +write_basic_package_version_file( + ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config-version.cmake + VERSION ${PROJECT_VERSION} + COMPATIBILITY SameMajorVersion) + +install( + FILES + ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config.cmake + ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config-version.cmake + DESTINATION + ${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}) diff --git a/cmake b/cmake new file mode 160000 index 0000000..f832097 --- /dev/null +++ b/cmake @@ -0,0 +1 @@ +Subproject commit f832097251b21eb278a626bff6146ca9df594406 diff --git a/cmake/CodeCoverage.cmake b/cmake/CodeCoverage.cmake deleted file mode 100644 index 4627c42..0000000 --- a/cmake/CodeCoverage.cmake +++ /dev/null @@ -1,444 +0,0 @@ -# Copyright (c) 2012 - 2017, Lars Bilke -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without modification, -# are permitted provided that the following conditions are met: -# -# 1. Redistributions of source code must retain the above copyright notice, this -# list of conditions and the following disclaimer. -# -# 2. Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# 3. Neither the name of the copyright holder nor the names of its contributors -# may be used to endorse or promote products derived from this software without -# specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# CHANGES: -# -# 2012-01-31, Lars Bilke -# - Enable Code Coverage -# -# 2013-09-17, Joakim Söderberg -# - Added support for Clang. -# - Some additional usage instructions. -# -# 2016-02-03, Lars Bilke -# - Refactored functions to use named parameters -# -# 2017-06-02, Lars Bilke -# - Merged with modified version from github.com/ufz/ogs -# -# 2019-05-06, Anatolii Kurotych -# - Remove unnecessary --coverage flag -# -# 2019-12-13, FeRD (Frank Dana) -# - Deprecate COVERAGE_LCOVR_EXCLUDES and COVERAGE_GCOVR_EXCLUDES lists in favor -# of tool-agnostic COVERAGE_EXCLUDES variable, or EXCLUDE setup arguments. -# - CMake 3.4+: All excludes can be specified relative to BASE_DIRECTORY -# - All setup functions: accept BASE_DIRECTORY, EXCLUDE list -# - Set lcov basedir with -b argument -# - Add automatic --demangle-cpp in lcovr, if 'c++filt' is available (can be -# overridden with NO_DEMANGLE option in setup_target_for_coverage_lcovr().) -# - Delete output dir, .info file on 'make clean' -# - Remove Python detection, since version mismatches will break gcovr -# - Minor cleanup (lowercase function names, update examples...) -# -# 2019-12-19, FeRD (Frank Dana) -# - Rename Lcov outputs, make filtered file canonical, fix cleanup for targets -# -# 2020-01-19, Bob Apthorpe -# - Added gfortran support -# -# 2020-02-17, FeRD (Frank Dana) -# - Make all add_custom_target()s VERBATIM to auto-escape wildcard characters -# in EXCLUDEs, and remove manual escaping from gcovr targets -# -# USAGE: -# -# 1. Copy this file into your cmake modules path. -# -# 2. Add the following line to your CMakeLists.txt (best inside an if-condition -# using a CMake option() to enable it just optionally): -# include(CodeCoverage) -# -# 3. Append necessary compiler flags: -# append_coverage_compiler_flags() -# -# 3.a (OPTIONAL) Set appropriate optimization flags, e.g. -O0, -O1 or -Og -# -# 4. If you need to exclude additional directories from the report, specify them -# using full paths in the COVERAGE_EXCLUDES variable before calling -# setup_target_for_coverage_*(). -# Example: -# set(COVERAGE_EXCLUDES -# '${PROJECT_SOURCE_DIR}/src/dir1/*' -# '/path/to/my/src/dir2/*') -# Or, use the EXCLUDE argument to setup_target_for_coverage_*(). -# Example: -# setup_target_for_coverage_lcov( -# NAME coverage -# EXECUTABLE testrunner -# EXCLUDE "${PROJECT_SOURCE_DIR}/src/dir1/*" "/path/to/my/src/dir2/*") -# -# 4.a NOTE: With CMake 3.4+, COVERAGE_EXCLUDES or EXCLUDE can also be set -# relative to the BASE_DIRECTORY (default: PROJECT_SOURCE_DIR) -# Example: -# set(COVERAGE_EXCLUDES "dir1/*") -# setup_target_for_coverage_gcovr_html( -# NAME coverage -# EXECUTABLE testrunner -# BASE_DIRECTORY "${PROJECT_SOURCE_DIR}/src" -# EXCLUDE "dir2/*") -# -# 5. Use the functions described below to create a custom make target which -# runs your test executable and produces a code coverage report. -# -# 6. Build a Debug build: -# cmake -DCMAKE_BUILD_TYPE=Debug .. -# make -# make my_coverage_target -# - -include(CMakeParseArguments) - -# Check prereqs -find_program( GCOV_PATH gcov ) -find_program( LCOV_PATH NAMES lcov lcov.bat lcov.exe lcov.perl) -find_program( GENHTML_PATH NAMES genhtml genhtml.perl genhtml.bat ) -find_program( LLVM_COV_PATH llvm-cov ) -find_program( GCOVR_PATH gcovr PATHS ${CMAKE_SOURCE_DIR}/scripts/test) -find_program( CPPFILT_PATH NAMES c++filt ) - -if(NOT GCOV_PATH) - message(FATAL_ERROR "gcov not found! Aborting...") -endif() # NOT GCOV_PATH - -if("${CMAKE_CXX_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang") - if("${CMAKE_CXX_COMPILER_VERSION}" VERSION_LESS 3) - message(FATAL_ERROR "Clang version must be 3.0.0 or greater! Aborting...") - endif() -elseif(NOT CMAKE_COMPILER_IS_GNUCXX) - if("${CMAKE_Fortran_COMPILER_ID}" MATCHES "[Ff]lang") - # Do nothing; exit conditional without error if true - elseif("${CMAKE_Fortran_COMPILER_ID}" MATCHES "GNU") - # Do nothing; exit conditional without error if true - else() - message(FATAL_ERROR "Compiler is not GNU gcc! Aborting...") - endif() -endif() - -set(COVERAGE_COMPILER_FLAGS "-g -fprofile-arcs -ftest-coverage" - CACHE INTERNAL "") - -set(CMAKE_Fortran_FLAGS_COVERAGE - ${COVERAGE_COMPILER_FLAGS} - CACHE STRING "Flags used by the Fortran compiler during coverage builds." - FORCE ) -set(CMAKE_CXX_FLAGS_COVERAGE - ${COVERAGE_COMPILER_FLAGS} - CACHE STRING "Flags used by the C++ compiler during coverage builds." - FORCE ) -set(CMAKE_C_FLAGS_COVERAGE - ${COVERAGE_COMPILER_FLAGS} - CACHE STRING "Flags used by the C compiler during coverage builds." - FORCE ) -set(CMAKE_EXE_LINKER_FLAGS_COVERAGE - "" - CACHE STRING "Flags used for linking binaries during coverage builds." - FORCE ) -set(CMAKE_SHARED_LINKER_FLAGS_COVERAGE - "" - CACHE STRING "Flags used by the shared libraries linker during coverage builds." - FORCE ) -mark_as_advanced( - CMAKE_Fortran_FLAGS_COVERAGE - CMAKE_CXX_FLAGS_COVERAGE - CMAKE_C_FLAGS_COVERAGE - CMAKE_EXE_LINKER_FLAGS_COVERAGE - CMAKE_SHARED_LINKER_FLAGS_COVERAGE ) - -if(NOT CMAKE_BUILD_TYPE STREQUAL "Debug") - message(WARNING "Code coverage results with an optimised (non-Debug) build may be misleading") -endif() # NOT CMAKE_BUILD_TYPE STREQUAL "Debug" - -if(CMAKE_C_COMPILER_ID STREQUAL "GNU" OR CMAKE_Fortran_COMPILER_ID STREQUAL "GNU") - link_libraries(gcov) -endif() - -# Defines a target for running and collection code coverage information -# Builds dependencies, runs the given executable and outputs reports. -# NOTE! The executable should always have a ZERO as exit code otherwise -# the coverage generation will not complete. -# -# setup_target_for_coverage_lcov( -# NAME testrunner_coverage # New target name -# EXECUTABLE testrunner -j ${PROCESSOR_COUNT} # Executable in PROJECT_BINARY_DIR -# DEPENDENCIES testrunner # Dependencies to build first -# BASE_DIRECTORY "../" # Base directory for report -# # (defaults to PROJECT_SOURCE_DIR) -# EXCLUDE "src/dir1/*" "src/dir2/*" # Patterns to exclude (can be relative -# # to BASE_DIRECTORY, with CMake 3.4+) -# NO_DEMANGLE # Don't demangle C++ symbols -# # even if c++filt is found -# ) -function(setup_target_for_coverage_lcov) - - set(options NO_DEMANGLE) - set(oneValueArgs BASE_DIRECTORY NAME) - set(multiValueArgs EXCLUDE EXECUTABLE EXECUTABLE_ARGS DEPENDENCIES LCOV_ARGS GENHTML_ARGS) - cmake_parse_arguments(Coverage "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) - - if(NOT LCOV_PATH) - message(FATAL_ERROR "lcov not found! Aborting...") - endif() # NOT LCOV_PATH - - # Needed for gcov_for_clang.sh - if(NOT LLVM_COV_PATH) - message(FATAL_ERROR "llvm-cov not found! Aborting...") - endif() # NOT LLVM_COV_PATH - - if(NOT GENHTML_PATH) - message(FATAL_ERROR "genhtml not found! Aborting...") - endif() # NOT GENHTML_PATH - - # Set base directory (as absolute path), or default to PROJECT_SOURCE_DIR - if(${Coverage_BASE_DIRECTORY}) - get_filename_component(BASEDIR ${Coverage_BASE_DIRECTORY} ABSOLUTE) - else() - set(BASEDIR ${PROJECT_SOURCE_DIR}) - endif() - - # Collect excludes (CMake 3.4+: Also compute absolute paths) - set(LCOV_EXCLUDES "") - foreach(EXCLUDE ${Coverage_EXCLUDE} ${COVERAGE_EXCLUDES} ${COVERAGE_LCOV_EXCLUDES}) - if(CMAKE_VERSION VERSION_GREATER 3.4) - get_filename_component(EXCLUDE ${EXCLUDE} ABSOLUTE BASE_DIR ${BASEDIR}) - endif() - list(APPEND LCOV_EXCLUDES "${EXCLUDE}") - endforeach() - list(REMOVE_DUPLICATES LCOV_EXCLUDES) - - # Conditional arguments - if(CPPFILT_PATH AND NOT ${Coverage_NO_DEMANGLE}) - set(GENHTML_EXTRA_ARGS "--demangle-cpp") - endif() - - set(GCOV_PATH ${CMAKE_SOURCE_DIR}/cmake/gcov_for_clang.sh) - - # Setup target - add_custom_target(${Coverage_NAME} - - # Cleanup lcov - COMMAND ${LCOV_PATH} ${Coverage_LCOV_ARGS} --gcov-tool ${GCOV_PATH} -directory . -b ${BASEDIR} --zerocounters - # Create baseline to make sure untouched files show up in the report - COMMAND ${LCOV_PATH} ${Coverage_LCOV_ARGS} --gcov-tool ${GCOV_PATH} -c -i -d . -b ${BASEDIR} -o ${Coverage_NAME}.base - - # Run tests - COMMAND ${Coverage_EXECUTABLE} ${Coverage_EXECUTABLE_ARGS} - - # Capturing lcov counters and generating report - COMMAND ${LCOV_PATH} ${Coverage_LCOV_ARGS} --gcov-tool ${GCOV_PATH} --directory . -b ${BASEDIR} --capture --output-file ${Coverage_NAME}.capture - # add baseline counters - COMMAND ${LCOV_PATH} ${Coverage_LCOV_ARGS} --gcov-tool ${GCOV_PATH} -a ${Coverage_NAME}.base -a ${Coverage_NAME}.capture --output-file ${Coverage_NAME}.total - # filter collected data to final coverage report - COMMAND ${LCOV_PATH} ${Coverage_LCOV_ARGS} --gcov-tool ${GCOV_PATH} --remove ${Coverage_NAME}.total ${LCOV_EXCLUDES} --output-file ${Coverage_NAME}.info - - # Generate HTML output - COMMAND ${GENHTML_PATH} ${GENHTML_EXTRA_ARGS} ${Coverage_GENHTML_ARGS} -o ${Coverage_NAME} ${Coverage_NAME}.info - - # Set output files as GENERATED (will be removed on 'make clean') - BYPRODUCTS - ${Coverage_NAME}.base - ${Coverage_NAME}.capture - ${Coverage_NAME}.total - ${Coverage_NAME}.info - ${Coverage_NAME} # report directory - - WORKING_DIRECTORY ${PROJECT_BINARY_DIR} - DEPENDS ${Coverage_DEPENDENCIES} - VERBATIM # Protect arguments to commands - COMMENT "Resetting code coverage counters to zero.\nProcessing code coverage counters and generating report." - ) - - # Show where to find the lcov info report - add_custom_command(TARGET ${Coverage_NAME} POST_BUILD - COMMAND ; - COMMENT "Lcov code coverage info report saved in ${Coverage_NAME}.info." - ) - - # Show info where to find the report - add_custom_command(TARGET ${Coverage_NAME} POST_BUILD - COMMAND ; - COMMENT "Open ./${Coverage_NAME}/index.html in your browser to view the coverage report." - ) - -endfunction() # setup_target_for_coverage_lcov - -# Defines a target for running and collection code coverage information -# Builds dependencies, runs the given executable and outputs reports. -# NOTE! The executable should always have a ZERO as exit code otherwise -# the coverage generation will not complete. -# -# setup_target_for_coverage_gcovr_xml( -# NAME ctest_coverage # New target name -# EXECUTABLE ctest -j ${PROCESSOR_COUNT} # Executable in PROJECT_BINARY_DIR -# DEPENDENCIES executable_target # Dependencies to build first -# BASE_DIRECTORY "../" # Base directory for report -# # (defaults to PROJECT_SOURCE_DIR) -# EXCLUDE "src/dir1/*" "src/dir2/*" # Patterns to exclude (can be relative -# # to BASE_DIRECTORY, with CMake 3.4+) -# ) -function(setup_target_for_coverage_gcovr_xml) - - set(options NONE) - set(oneValueArgs BASE_DIRECTORY NAME) - set(multiValueArgs EXCLUDE EXECUTABLE EXECUTABLE_ARGS DEPENDENCIES) - cmake_parse_arguments(Coverage "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) - - if(NOT GCOVR_PATH) - message(FATAL_ERROR "gcovr not found! Aborting...") - endif() # NOT GCOVR_PATH - - # Set base directory (as absolute path), or default to PROJECT_SOURCE_DIR - if(${Coverage_BASE_DIRECTORY}) - get_filename_component(BASEDIR ${Coverage_BASE_DIRECTORY} ABSOLUTE) - else() - set(BASEDIR ${PROJECT_SOURCE_DIR}) - endif() - - # Collect excludes (CMake 3.4+: Also compute absolute paths) - set(GCOVR_EXCLUDES "") - foreach(EXCLUDE ${Coverage_EXCLUDE} ${COVERAGE_EXCLUDES} ${COVERAGE_GCOVR_EXCLUDES}) - if(CMAKE_VERSION VERSION_GREATER 3.4) - get_filename_component(EXCLUDE ${EXCLUDE} ABSOLUTE BASE_DIR ${BASEDIR}) - endif() - list(APPEND GCOVR_EXCLUDES "${EXCLUDE}") - endforeach() - list(REMOVE_DUPLICATES GCOVR_EXCLUDES) - - # Combine excludes to several -e arguments - set(GCOVR_EXCLUDE_ARGS "") - foreach(EXCLUDE ${GCOVR_EXCLUDES}) - list(APPEND GCOVR_EXCLUDE_ARGS "-e") - list(APPEND GCOVR_EXCLUDE_ARGS "${EXCLUDE}") - endforeach() - - add_custom_target(${Coverage_NAME} - # Run tests - ${Coverage_EXECUTABLE} ${Coverage_EXECUTABLE_ARGS} - - # Running gcovr - COMMAND ${GCOVR_PATH} --xml - -r ${BASEDIR} ${GCOVR_EXCLUDE_ARGS} - --object-directory=${PROJECT_BINARY_DIR} - -o ${Coverage_NAME}.xml - BYPRODUCTS ${Coverage_NAME}.xml - WORKING_DIRECTORY ${PROJECT_BINARY_DIR} - DEPENDS ${Coverage_DEPENDENCIES} - VERBATIM # Protect arguments to commands - COMMENT "Running gcovr to produce Cobertura code coverage report." - ) - - # Show info where to find the report - add_custom_command(TARGET ${Coverage_NAME} POST_BUILD - COMMAND ; - COMMENT "Cobertura code coverage report saved in ${Coverage_NAME}.xml." - ) -endfunction() # setup_target_for_coverage_gcovr_xml - -# Defines a target for running and collection code coverage information -# Builds dependencies, runs the given executable and outputs reports. -# NOTE! The executable should always have a ZERO as exit code otherwise -# the coverage generation will not complete. -# -# setup_target_for_coverage_gcovr_html( -# NAME ctest_coverage # New target name -# EXECUTABLE ctest -j ${PROCESSOR_COUNT} # Executable in PROJECT_BINARY_DIR -# DEPENDENCIES executable_target # Dependencies to build first -# BASE_DIRECTORY "../" # Base directory for report -# # (defaults to PROJECT_SOURCE_DIR) -# EXCLUDE "src/dir1/*" "src/dir2/*" # Patterns to exclude (can be relative -# # to BASE_DIRECTORY, with CMake 3.4+) -# ) -function(setup_target_for_coverage_gcovr_html) - - set(options NONE) - set(oneValueArgs BASE_DIRECTORY NAME) - set(multiValueArgs EXCLUDE EXECUTABLE EXECUTABLE_ARGS DEPENDENCIES) - cmake_parse_arguments(Coverage "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) - - if(NOT GCOVR_PATH) - message(FATAL_ERROR "gcovr not found! Aborting...") - endif() # NOT GCOVR_PATH - - # Set base directory (as absolute path), or default to PROJECT_SOURCE_DIR - if(${Coverage_BASE_DIRECTORY}) - get_filename_component(BASEDIR ${Coverage_BASE_DIRECTORY} ABSOLUTE) - else() - set(BASEDIR ${PROJECT_SOURCE_DIR}) - endif() - - # Collect excludes (CMake 3.4+: Also compute absolute paths) - set(GCOVR_EXCLUDES "") - foreach(EXCLUDE ${Coverage_EXCLUDE} ${COVERAGE_EXCLUDES} ${COVERAGE_GCOVR_EXCLUDES}) - if(CMAKE_VERSION VERSION_GREATER 3.4) - get_filename_component(EXCLUDE ${EXCLUDE} ABSOLUTE BASE_DIR ${BASEDIR}) - endif() - list(APPEND GCOVR_EXCLUDES "${EXCLUDE}") - endforeach() - list(REMOVE_DUPLICATES GCOVR_EXCLUDES) - - # Combine excludes to several -e arguments - set(GCOVR_EXCLUDE_ARGS "") - foreach(EXCLUDE ${GCOVR_EXCLUDES}) - list(APPEND GCOVR_EXCLUDE_ARGS "-e") - list(APPEND GCOVR_EXCLUDE_ARGS "${EXCLUDE}") - endforeach() - - add_custom_target(${Coverage_NAME} - # Run tests - ${Coverage_EXECUTABLE} ${Coverage_EXECUTABLE_ARGS} - - # Create folder - COMMAND ${CMAKE_COMMAND} -E make_directory ${PROJECT_BINARY_DIR}/${Coverage_NAME} - - # Running gcovr - COMMAND ${GCOVR_PATH} --html --html-details - -r ${BASEDIR} ${GCOVR_EXCLUDE_ARGS} - --object-directory=${PROJECT_BINARY_DIR} - -o ${Coverage_NAME}/index.html - - BYPRODUCTS ${PROJECT_BINARY_DIR}/${Coverage_NAME} # report directory - WORKING_DIRECTORY ${PROJECT_BINARY_DIR} - DEPENDS ${Coverage_DEPENDENCIES} - VERBATIM # Protect arguments to commands - COMMENT "Running gcovr to produce HTML code coverage report." - ) - - # Show info where to find the report - add_custom_command(TARGET ${Coverage_NAME} POST_BUILD - COMMAND ; - COMMENT "Open ./${Coverage_NAME}/index.html in your browser to view the coverage report." - ) - -endfunction() # setup_target_for_coverage_gcovr_html - -function(append_coverage_compiler_flags) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE) - set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE) - message(STATUS "Appending code coverage compiler flags: ${COVERAGE_COMPILER_FLAGS}") -endfunction() # append_coverage_compiler_flags diff --git a/cmake/Hunter/config.cmake b/cmake/Hunter/config.cmake deleted file mode 100644 index 00c3753..0000000 --- a/cmake/Hunter/config.cmake +++ /dev/null @@ -1,128 +0,0 @@ -hunter_config(Boost - VERSION ${HUNTER_Boost_VERSION} - CMAKE_ARGS - USE_CONFIG_FROM_BOOST=ON - Boost_USE_STATIC_LIBS=ON - Boost_NO_BOOST_CMAKE=ON -) - -hunter_config(Protobuf - URL "https://github.com/koinos/protobuf/archive/e1b1477875a8b022903b548eb144f2c7bf4d9561.tar.gz" - SHA1 "5796707a98eec15ffb3ad86ff50e8eec5fa65e68" - CMAKE_ARGS - CMAKE_CXX_FLAGS=-fvisibility=hidden - CMAKE_C_FLAGS=-fvisibility=hidden -) - -hunter_config(rocksdb - URL "https://github.com/facebook/rocksdb/archive/v6.15.2.tar.gz" - SHA1 "daf7ef3946fd39c910acaaa57789af8515b39251" - CMAKE_ARGS - WITH_TESTS=OFF - WITH_TOOLS=OFF - WITH_JNI=OFF - WITH_BENCHMARK_TOOLS=OFF - WITH_CORE_TOOLS=OFF - WITH_GFLAGS=OFF - PORTABLE=ON - FAIL_ON_WARNINGS=OFF - ROCKSDB_BUILD_SHARED=OFF - CMAKE_CXX_FLAGS=-fvisibility=hidden - CMAKE_C_FLAGS=-fvisibility=hidden -) - -hunter_config(yaml-cpp - VERSION "0.6.3" - CMAKE_ARGS - CMAKE_CXX_FLAGS=-fvisibility=hidden - CMAKE_C_FLAGS=-fvisibility=hidden -) - -hunter_config(gRPC - VERSION 1.31.0-p0 - CMAKE_ARGS - CMAKE_POSITION_INDEPENDENT_CODE=ON - CMAKE_CXX_STANDARD=17 - CMAKE_CXX_STANDARD_REQUIRED=ON -) - -hunter_config(abseil - VERSION ${HUNTER_abseil_VERSION} - CMAKE_ARGS - CMAKE_POSITION_INDEPENDENT_CODE=ON - CMAKE_CXX_STANDARD=17 - CMAKE_CXX_STANDARD_REQUIRED=ON -) - -hunter_config(re2 - VERSION ${HUNTER_re2_VERSION} - CMAKE_ARGS - CMAKE_POSITION_INDEPENDENT_CODE=ON - CMAKE_CXX_STANDARD=17 - CMAKE_CXX_STANDARD_REQUIRED=ON -) - -hunter_config(c-ares - VERSION ${HUNTER_c-ares_VERSION} - CMAKE_ARGS - CMAKE_POSITION_INDEPENDENT_CODE=ON - CMAKE_CXX_STANDARD=17 - CMAKE_CXX_STANDARD_REQUIRED=ON -) - -hunter_config(ZLIB - VERSION ${HUNTER_ZLIB_VERSION} - CMAKE_ARGS - CMAKE_POSITION_INDEPENDENT_CODE=ON - CMAKE_CXX_STANDARD=17 - CMAKE_CXX_STANDARD_REQUIRED=ON -) - -hunter_config(libsecp256k1 - URL "https://github.com/soramitsu/soramitsu-libsecp256k1/archive/c7630e1bac638c0f16ee66d4dce7b5c49eecbaa5.tar.gz" - SHA1 "0534fa8948f279b26fd102905215a56f0ad7fa18" -) - -hunter_config(libsecp256k1-vrf - URL "https://github.com/koinos/secp256k1-vrf/archive/db479e83be5685f652a9bafefaef77246fdf3bbe.tar.gz" - SHA1 "62df75e061c4afd6f0548f1e8267cc3da6abee15" -) - -hunter_config(ethash - URL "https://github.com/chfast/ethash/archive/refs/tags/v0.8.0.tar.gz" - SHA1 "41fd440f70b6a8dfc3fd29b20f471dcbd1345ad0" - CMAKE_ARGS - CMAKE_CXX_STANDARD=17 - CMAKE_CXX_STANDARD_REQUIRED=ON -) - -hunter_config(koinos_log - URL "https://github.com/koinos/koinos-log-cpp/archive/ca1fdcbb26ee2d9c2c45f8692747b3f7a5235025.tar.gz" - SHA1 "3eb809598fc1812e217d867e583abe69f4804e38" - CMAKE_ARGS - BUILD_TESTS=OFF -) - -hunter_config(koinos_util - URL "https://github.com/koinos/koinos-util-cpp/archive/dd3e15f0b08a99082b736b901bb78c0af4ed1982.tar.gz" - SHA1 "e5b475c10885dc5426c16a3e1122267b4a1668e1" - CMAKE_ARGS - BUILD_TESTS=OFF -) - -hunter_config(koinos_proto - URL "https://github.com/koinos/koinos-proto-cpp/archive/04d6a7f0cf8d2eeaddd105441c398eaff8a1a519.tar.gz" - SHA1 "6d168b017b2545b03b8cd3ea4b1590b471da78e7" -) - -hunter_config(koinos_exception - URL "https://github.com/koinos/koinos-exception-cpp/archive/5501569e8bec1c97ddc1257e25ec1149bc2b50e9.tar.gz" - SHA1 "5c6966904fa5d28b7ea86194ef2fb4ce68fbdb59" - CMAKE_ARGS - BUILD_TESTS=OFF -) - -hunter_config(koinos_crypto - URL "https://github.com/koinos/koinos-crypto-cpp/archive/2f91acfd683b824439b9844095cdc2e89f371037.tar.gz" - SHA1 "88a3d6f6a6d029aa287f85acb4a878dc844818b1" -) diff --git a/cmake/Hunter/passwords.cmake b/cmake/Hunter/passwords.cmake deleted file mode 100644 index 357202e..0000000 --- a/cmake/Hunter/passwords.cmake +++ /dev/null @@ -1,8 +0,0 @@ -# cmake/Hunter/passwords.cmake - -hunter_upload_password( - REPO_OWNER "koinos" - REPO "hunter-cache" - USERNAME "koinos-ci" - PASSWORD "$ENV{GITHUB_USER_PASSWORD}" -) diff --git a/cmake/HunterGate.cmake b/cmake/HunterGate.cmake deleted file mode 100644 index 6d9cc24..0000000 --- a/cmake/HunterGate.cmake +++ /dev/null @@ -1,539 +0,0 @@ -# Copyright (c) 2013-2019, Ruslan Baratov -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright notice, this -# list of conditions and the following disclaimer. -# -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -# This is a gate file to Hunter package manager. -# Include this file using `include` command and add package you need, example: -# -# cmake_minimum_required(VERSION 3.2) -# -# include("cmake/HunterGate.cmake") -# HunterGate( -# URL "https://github.com/path/to/hunter/archive.tar.gz" -# SHA1 "798501e983f14b28b10cda16afa4de69eee1da1d" -# ) -# -# project(MyProject) -# -# hunter_add_package(Foo) -# hunter_add_package(Boo COMPONENTS Bar Baz) -# -# Projects: -# * https://github.com/hunter-packages/gate/ -# * https://github.com/ruslo/hunter - -option(HUNTER_ENABLED "Enable Hunter package manager support" ON) - -if(HUNTER_ENABLED) - if(CMAKE_VERSION VERSION_LESS "3.2") - message( - FATAL_ERROR - "At least CMake version 3.2 required for Hunter dependency management." - " Update CMake or set HUNTER_ENABLED to OFF." - ) - endif() -endif() - -include(CMakeParseArguments) # cmake_parse_arguments - -option(HUNTER_STATUS_PRINT "Print working status" ON) -option(HUNTER_STATUS_DEBUG "Print a lot info" OFF) -option(HUNTER_TLS_VERIFY "Enable/disable TLS certificate checking on downloads" ON) - -set(HUNTER_ERROR_PAGE "https://docs.hunter.sh/en/latest/reference/errors") - -function(hunter_gate_status_print) - if(HUNTER_STATUS_PRINT OR HUNTER_STATUS_DEBUG) - foreach(print_message ${ARGV}) - message(STATUS "[hunter] ${print_message}") - endforeach() - endif() -endfunction() - -function(hunter_gate_status_debug) - if(HUNTER_STATUS_DEBUG) - foreach(print_message ${ARGV}) - string(TIMESTAMP timestamp) - message(STATUS "[hunter *** DEBUG *** ${timestamp}] ${print_message}") - endforeach() - endif() -endfunction() - -function(hunter_gate_error_page error_page) - message("------------------------------ ERROR ------------------------------") - message(" ${HUNTER_ERROR_PAGE}/${error_page}.html") - message("-------------------------------------------------------------------") - message("") - message(FATAL_ERROR "") -endfunction() - -function(hunter_gate_internal_error) - message("") - foreach(print_message ${ARGV}) - message("[hunter ** INTERNAL **] ${print_message}") - endforeach() - message("[hunter ** INTERNAL **] [Directory:${CMAKE_CURRENT_LIST_DIR}]") - message("") - hunter_gate_error_page("error.internal") -endfunction() - -function(hunter_gate_fatal_error) - cmake_parse_arguments(hunter "" "ERROR_PAGE" "" "${ARGV}") - if("${hunter_ERROR_PAGE}" STREQUAL "") - hunter_gate_internal_error("Expected ERROR_PAGE") - endif() - message("") - foreach(x ${hunter_UNPARSED_ARGUMENTS}) - message("[hunter ** FATAL ERROR **] ${x}") - endforeach() - message("[hunter ** FATAL ERROR **] [Directory:${CMAKE_CURRENT_LIST_DIR}]") - message("") - hunter_gate_error_page("${hunter_ERROR_PAGE}") -endfunction() - -function(hunter_gate_user_error) - hunter_gate_fatal_error(${ARGV} ERROR_PAGE "error.incorrect.input.data") -endfunction() - -function(hunter_gate_self root version sha1 result) - string(COMPARE EQUAL "${root}" "" is_bad) - if(is_bad) - hunter_gate_internal_error("root is empty") - endif() - - string(COMPARE EQUAL "${version}" "" is_bad) - if(is_bad) - hunter_gate_internal_error("version is empty") - endif() - - string(COMPARE EQUAL "${sha1}" "" is_bad) - if(is_bad) - hunter_gate_internal_error("sha1 is empty") - endif() - - string(SUBSTRING "${sha1}" 0 7 archive_id) - - if(EXISTS "${root}/cmake/Hunter") - set(hunter_self "${root}") - else() - set( - hunter_self - "${root}/_Base/Download/Hunter/${version}/${archive_id}/Unpacked" - ) - endif() - - set("${result}" "${hunter_self}" PARENT_SCOPE) -endfunction() - -# Set HUNTER_GATE_ROOT cmake variable to suitable value. -function(hunter_gate_detect_root) - # Check CMake variable - string(COMPARE NOTEQUAL "${HUNTER_ROOT}" "" not_empty) - if(not_empty) - set(HUNTER_GATE_ROOT "${HUNTER_ROOT}" PARENT_SCOPE) - hunter_gate_status_debug("HUNTER_ROOT detected by cmake variable") - return() - endif() - - # Check environment variable - string(COMPARE NOTEQUAL "$ENV{HUNTER_ROOT}" "" not_empty) - if(not_empty) - set(HUNTER_GATE_ROOT "$ENV{HUNTER_ROOT}" PARENT_SCOPE) - hunter_gate_status_debug("HUNTER_ROOT detected by environment variable") - return() - endif() - - # Check HOME environment variable - string(COMPARE NOTEQUAL "$ENV{HOME}" "" result) - if(result) - set(HUNTER_GATE_ROOT "$ENV{HOME}/.hunter" PARENT_SCOPE) - hunter_gate_status_debug("HUNTER_ROOT set using HOME environment variable") - return() - endif() - - # Check SYSTEMDRIVE and USERPROFILE environment variable (windows only) - if(WIN32) - string(COMPARE NOTEQUAL "$ENV{SYSTEMDRIVE}" "" result) - if(result) - set(HUNTER_GATE_ROOT "$ENV{SYSTEMDRIVE}/.hunter" PARENT_SCOPE) - hunter_gate_status_debug( - "HUNTER_ROOT set using SYSTEMDRIVE environment variable" - ) - return() - endif() - - string(COMPARE NOTEQUAL "$ENV{USERPROFILE}" "" result) - if(result) - set(HUNTER_GATE_ROOT "$ENV{USERPROFILE}/.hunter" PARENT_SCOPE) - hunter_gate_status_debug( - "HUNTER_ROOT set using USERPROFILE environment variable" - ) - return() - endif() - endif() - - hunter_gate_fatal_error( - "Can't detect HUNTER_ROOT" - ERROR_PAGE "error.detect.hunter.root" - ) -endfunction() - -function(hunter_gate_download dir) - string( - COMPARE - NOTEQUAL - "$ENV{HUNTER_DISABLE_AUTOINSTALL}" - "" - disable_autoinstall - ) - if(disable_autoinstall AND NOT HUNTER_RUN_INSTALL) - hunter_gate_fatal_error( - "Hunter not found in '${dir}'" - "Set HUNTER_RUN_INSTALL=ON to auto-install it from '${HUNTER_GATE_URL}'" - "Settings:" - " HUNTER_ROOT: ${HUNTER_GATE_ROOT}" - " HUNTER_SHA1: ${HUNTER_GATE_SHA1}" - ERROR_PAGE "error.run.install" - ) - endif() - string(COMPARE EQUAL "${dir}" "" is_bad) - if(is_bad) - hunter_gate_internal_error("Empty 'dir' argument") - endif() - - string(COMPARE EQUAL "${HUNTER_GATE_SHA1}" "" is_bad) - if(is_bad) - hunter_gate_internal_error("HUNTER_GATE_SHA1 empty") - endif() - - string(COMPARE EQUAL "${HUNTER_GATE_URL}" "" is_bad) - if(is_bad) - hunter_gate_internal_error("HUNTER_GATE_URL empty") - endif() - - set(done_location "${dir}/DONE") - set(sha1_location "${dir}/SHA1") - - set(build_dir "${dir}/Build") - set(cmakelists "${dir}/CMakeLists.txt") - - hunter_gate_status_debug("Locking directory: ${dir}") - file(LOCK "${dir}" DIRECTORY GUARD FUNCTION) - hunter_gate_status_debug("Lock done") - - if(EXISTS "${done_location}") - # while waiting for lock other instance can do all the job - hunter_gate_status_debug("File '${done_location}' found, skip install") - return() - endif() - - file(REMOVE_RECURSE "${build_dir}") - file(REMOVE_RECURSE "${cmakelists}") - - file(MAKE_DIRECTORY "${build_dir}") # check directory permissions - - # Disabling languages speeds up a little bit, reduces noise in the output - # and avoids path too long windows error - file( - WRITE - "${cmakelists}" - "cmake_minimum_required(VERSION 3.2)\n" - "project(HunterDownload LANGUAGES NONE)\n" - "include(ExternalProject)\n" - "ExternalProject_Add(\n" - " Hunter\n" - " URL\n" - " \"${HUNTER_GATE_URL}\"\n" - " URL_HASH\n" - " SHA1=${HUNTER_GATE_SHA1}\n" - " DOWNLOAD_DIR\n" - " \"${dir}\"\n" - " TLS_VERIFY\n" - " ${HUNTER_TLS_VERIFY}\n" - " SOURCE_DIR\n" - " \"${dir}/Unpacked\"\n" - " CONFIGURE_COMMAND\n" - " \"\"\n" - " BUILD_COMMAND\n" - " \"\"\n" - " INSTALL_COMMAND\n" - " \"\"\n" - ")\n" - ) - - if(HUNTER_STATUS_DEBUG) - set(logging_params "") - else() - set(logging_params OUTPUT_QUIET) - endif() - - hunter_gate_status_debug("Run generate") - - # Need to add toolchain file too. - # Otherwise on Visual Studio + MDD this will fail with error: - # "Could not find an appropriate version of the Windows 10 SDK installed on this machine" - if(EXISTS "${CMAKE_TOOLCHAIN_FILE}") - get_filename_component(absolute_CMAKE_TOOLCHAIN_FILE "${CMAKE_TOOLCHAIN_FILE}" ABSOLUTE) - set(toolchain_arg "-DCMAKE_TOOLCHAIN_FILE=${absolute_CMAKE_TOOLCHAIN_FILE}") - else() - # 'toolchain_arg' can't be empty - set(toolchain_arg "-DCMAKE_TOOLCHAIN_FILE=") - endif() - - string(COMPARE EQUAL "${CMAKE_MAKE_PROGRAM}" "" no_make) - if(no_make) - set(make_arg "") - else() - # Test case: remove Ninja from PATH but set it via CMAKE_MAKE_PROGRAM - set(make_arg "-DCMAKE_MAKE_PROGRAM=${CMAKE_MAKE_PROGRAM}") - endif() - - execute_process( - COMMAND - "${CMAKE_COMMAND}" - "-H${dir}" - "-B${build_dir}" - "-G${CMAKE_GENERATOR}" - "${toolchain_arg}" - ${make_arg} - WORKING_DIRECTORY "${dir}" - RESULT_VARIABLE download_result - ${logging_params} - ) - - if(NOT download_result EQUAL 0) - hunter_gate_internal_error( - "Configure project failed." - "To reproduce the error run: ${CMAKE_COMMAND} -H${dir} -B${build_dir} -G${CMAKE_GENERATOR} ${toolchain_arg} ${make_arg}" - "In directory ${dir}" - ) - endif() - - hunter_gate_status_print( - "Initializing Hunter workspace (${HUNTER_GATE_SHA1})" - " ${HUNTER_GATE_URL}" - " -> ${dir}" - ) - execute_process( - COMMAND "${CMAKE_COMMAND}" --build "${build_dir}" - WORKING_DIRECTORY "${dir}" - RESULT_VARIABLE download_result - ${logging_params} - ) - - if(NOT download_result EQUAL 0) - hunter_gate_internal_error("Build project failed") - endif() - - file(REMOVE_RECURSE "${build_dir}") - file(REMOVE_RECURSE "${cmakelists}") - - file(WRITE "${sha1_location}" "${HUNTER_GATE_SHA1}") - file(WRITE "${done_location}" "DONE") - - hunter_gate_status_debug("Finished") -endfunction() - -# Must be a macro so master file 'cmake/Hunter' can -# apply all variables easily just by 'include' command -# (otherwise PARENT_SCOPE magic needed) -macro(HunterGate) - if(HUNTER_GATE_DONE) - # variable HUNTER_GATE_DONE set explicitly for external project - # (see `hunter_download`) - set_property(GLOBAL PROPERTY HUNTER_GATE_DONE YES) - endif() - - # First HunterGate command will init Hunter, others will be ignored - get_property(_hunter_gate_done GLOBAL PROPERTY HUNTER_GATE_DONE SET) - - if(NOT HUNTER_ENABLED) - # Empty function to avoid error "unknown function" - function(hunter_add_package) - endfunction() - - set( - _hunter_gate_disabled_mode_dir - "${CMAKE_CURRENT_LIST_DIR}/cmake/Hunter/disabled-mode" - ) - if(EXISTS "${_hunter_gate_disabled_mode_dir}") - hunter_gate_status_debug( - "Adding \"disabled-mode\" modules: ${_hunter_gate_disabled_mode_dir}" - ) - list(APPEND CMAKE_PREFIX_PATH "${_hunter_gate_disabled_mode_dir}") - endif() - elseif(_hunter_gate_done) - hunter_gate_status_debug("Secondary HunterGate (use old settings)") - hunter_gate_self( - "${HUNTER_CACHED_ROOT}" - "${HUNTER_VERSION}" - "${HUNTER_SHA1}" - _hunter_self - ) - include("${_hunter_self}/cmake/Hunter") - else() - set(HUNTER_GATE_LOCATION "${CMAKE_CURRENT_SOURCE_DIR}") - - string(COMPARE NOTEQUAL "${PROJECT_NAME}" "" _have_project_name) - if(_have_project_name) - hunter_gate_fatal_error( - "Please set HunterGate *before* 'project' command. " - "Detected project: ${PROJECT_NAME}" - ERROR_PAGE "error.huntergate.before.project" - ) - endif() - - cmake_parse_arguments( - HUNTER_GATE "LOCAL" "URL;SHA1;GLOBAL;FILEPATH" "" ${ARGV} - ) - - string(COMPARE EQUAL "${HUNTER_GATE_SHA1}" "" _empty_sha1) - string(COMPARE EQUAL "${HUNTER_GATE_URL}" "" _empty_url) - string( - COMPARE - NOTEQUAL - "${HUNTER_GATE_UNPARSED_ARGUMENTS}" - "" - _have_unparsed - ) - string(COMPARE NOTEQUAL "${HUNTER_GATE_GLOBAL}" "" _have_global) - string(COMPARE NOTEQUAL "${HUNTER_GATE_FILEPATH}" "" _have_filepath) - - if(_have_unparsed) - hunter_gate_user_error( - "HunterGate unparsed arguments: ${HUNTER_GATE_UNPARSED_ARGUMENTS}" - ) - endif() - if(_empty_sha1) - hunter_gate_user_error("SHA1 suboption of HunterGate is mandatory") - endif() - if(_empty_url) - hunter_gate_user_error("URL suboption of HunterGate is mandatory") - endif() - if(_have_global) - if(HUNTER_GATE_LOCAL) - hunter_gate_user_error("Unexpected LOCAL (already has GLOBAL)") - endif() - if(_have_filepath) - hunter_gate_user_error("Unexpected FILEPATH (already has GLOBAL)") - endif() - endif() - if(HUNTER_GATE_LOCAL) - if(_have_global) - hunter_gate_user_error("Unexpected GLOBAL (already has LOCAL)") - endif() - if(_have_filepath) - hunter_gate_user_error("Unexpected FILEPATH (already has LOCAL)") - endif() - endif() - if(_have_filepath) - if(_have_global) - hunter_gate_user_error("Unexpected GLOBAL (already has FILEPATH)") - endif() - if(HUNTER_GATE_LOCAL) - hunter_gate_user_error("Unexpected LOCAL (already has FILEPATH)") - endif() - endif() - - hunter_gate_detect_root() # set HUNTER_GATE_ROOT - - # Beautify path, fix probable problems with windows path slashes - get_filename_component( - HUNTER_GATE_ROOT "${HUNTER_GATE_ROOT}" ABSOLUTE - ) - hunter_gate_status_debug("HUNTER_ROOT: ${HUNTER_GATE_ROOT}") - if(NOT HUNTER_ALLOW_SPACES_IN_PATH) - string(FIND "${HUNTER_GATE_ROOT}" " " _contain_spaces) - if(NOT _contain_spaces EQUAL -1) - hunter_gate_fatal_error( - "HUNTER_ROOT (${HUNTER_GATE_ROOT}) contains spaces." - "Set HUNTER_ALLOW_SPACES_IN_PATH=ON to skip this error" - "(Use at your own risk!)" - ERROR_PAGE "error.spaces.in.hunter.root" - ) - endif() - endif() - - string( - REGEX - MATCH - "[0-9]+\\.[0-9]+\\.[0-9]+[-_a-z0-9]*" - HUNTER_GATE_VERSION - "${HUNTER_GATE_URL}" - ) - string(COMPARE EQUAL "${HUNTER_GATE_VERSION}" "" _is_empty) - if(_is_empty) - set(HUNTER_GATE_VERSION "unknown") - endif() - - hunter_gate_self( - "${HUNTER_GATE_ROOT}" - "${HUNTER_GATE_VERSION}" - "${HUNTER_GATE_SHA1}" - _hunter_self - ) - - set(_master_location "${_hunter_self}/cmake/Hunter") - if(EXISTS "${HUNTER_GATE_ROOT}/cmake/Hunter") - # Hunter downloaded manually (e.g. by 'git clone') - set(_unused "xxxxxxxxxx") - set(HUNTER_GATE_SHA1 "${_unused}") - set(HUNTER_GATE_VERSION "${_unused}") - else() - get_filename_component(_archive_id_location "${_hunter_self}/.." ABSOLUTE) - set(_done_location "${_archive_id_location}/DONE") - set(_sha1_location "${_archive_id_location}/SHA1") - - # Check Hunter already downloaded by HunterGate - if(NOT EXISTS "${_done_location}") - hunter_gate_download("${_archive_id_location}") - endif() - - if(NOT EXISTS "${_done_location}") - hunter_gate_internal_error("hunter_gate_download failed") - endif() - - if(NOT EXISTS "${_sha1_location}") - hunter_gate_internal_error("${_sha1_location} not found") - endif() - file(READ "${_sha1_location}" _sha1_value) - string(COMPARE EQUAL "${_sha1_value}" "${HUNTER_GATE_SHA1}" _is_equal) - if(NOT _is_equal) - hunter_gate_internal_error( - "Short SHA1 collision:" - " ${_sha1_value} (from ${_sha1_location})" - " ${HUNTER_GATE_SHA1} (HunterGate)" - ) - endif() - if(NOT EXISTS "${_master_location}") - hunter_gate_user_error( - "Master file not found:" - " ${_master_location}" - "try to update Hunter/HunterGate" - ) - endif() - endif() - include("${_master_location}") - set_property(GLOBAL PROPERTY HUNTER_GATE_DONE YES) - endif() -endmacro() diff --git a/cmake/config.cmake.in b/cmake/config.cmake.in deleted file mode 100644 index 7064424..0000000 --- a/cmake/config.cmake.in +++ /dev/null @@ -1,16 +0,0 @@ -include(FindPackageHandleStandardArgs) -set(${CMAKE_FIND_PACKAGE_NAME}_CONFIG ${CMAKE_CURRENT_LIST_FILE}) -find_package_handle_standard_args(@PROJECT_NAME@ CONFIG_MODE) - -if(NOT TARGET Koinos::@KOINOS_LIB_TARGET_NAME@) - include("${CMAKE_CURRENT_LIST_DIR}/@KOINOS_LIB_TARGETS_EXPORT_NAME@.cmake") - if((NOT TARGET @KOINOS_LIB_TARGET_NAME@) AND - (NOT @PROJECT_NAME@_FIND_VERSION OR - @PROJECT_NAME@_FIND_VERSION VERSION_LESS 3.2.0)) - add_library(@KOINOS_LIB_TARGET_NAME@ INTERFACE IMPORTED GLOBAL) - set_target_properties(@KOINOS_LIB_TARGET_NAME@ PROPERTIES - INTERFACE_LINK_LIBRARIES Koinos::@KOINOS_LIB_TARGET_NAME@ - ) - endif() - add_library(Koinos::@KOINOS_LIB_TARGET_SHORT_NAME@ ALIAS @KOINOS_LIB_TARGET_NAME@) -endif() diff --git a/cmake/gcov_for_clang.sh b/cmake/gcov_for_clang.sh deleted file mode 100755 index 2788ba9..0000000 --- a/cmake/gcov_for_clang.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/sh - -which llvm-cov-11 > /dev/null -if [ $? -eq 0 ]; then - exec llvm-cov-11 gcov "$@" -else - exec llvm-cov gcov "$@" -fi diff --git a/cmake/koinos_state_dbConfigVersion.cmake.in b/cmake/koinos_state_dbConfigVersion.cmake.in deleted file mode 100644 index dc04e54..0000000 --- a/cmake/koinos_state_dbConfigVersion.cmake.in +++ /dev/null @@ -1,67 +0,0 @@ -# This is a basic version file for the Config-mode of find_package(). -# It is used by write_basic_package_version_file() as input file for configure_file() -# to create a version-file which can be installed along a config.cmake file. -# -# The created file sets PACKAGE_VERSION_EXACT if the current version string and -# the requested version string are exactly the same and it sets -# PACKAGE_VERSION_COMPATIBLE if the current version is >= requested version, -# but only if the requested major version is the same as the current one. -# The variable CVF_VERSION must be set before calling configure_file(). - - -set(PACKAGE_VERSION "@CVF_VERSION@") - -if(PACKAGE_VERSION VERSION_LESS PACKAGE_FIND_VERSION) - set(PACKAGE_VERSION_COMPATIBLE FALSE) -else() - - if("@CVF_VERSION@" MATCHES "^([0-9]+)\\.") - set(CVF_VERSION_MAJOR "${CMAKE_MATCH_1}") - else() - set(CVF_VERSION_MAJOR "@CVF_VERSION@") - endif() - - if(PACKAGE_FIND_VERSION_RANGE) - # both endpoints of the range must have the expected major version - math (EXPR CVF_VERSION_MAJOR_NEXT "${CVF_VERSION_MAJOR} + 1") - if (NOT PACKAGE_FIND_VERSION_MIN_MAJOR STREQUAL CVF_VERSION_MAJOR - OR ((PACKAGE_FIND_VERSION_RANGE_MAX STREQUAL "INCLUDE" AND NOT PACKAGE_FIND_VERSION_MAX_MAJOR STREQUAL CVF_VERSION_MAJOR) - OR (PACKAGE_FIND_VERSION_RANGE_MAX STREQUAL "EXCLUDE" AND NOT PACKAGE_FIND_VERSION_MAX VERSION_LESS_EQUAL CVF_VERSION_MAJOR_NEXT))) - set(PACKAGE_VERSION_COMPATIBLE FALSE) - elseif(PACKAGE_FIND_VERSION_MIN_MAJOR STREQUAL CVF_VERSION_MAJOR - AND ((PACKAGE_FIND_VERSION_RANGE_MAX STREQUAL "INCLUDE" AND PACKAGE_VERSION VERSION_LESS_EQUAL PACKAGE_FIND_VERSION_MAX) - OR (PACKAGE_FIND_VERSION_RANGE_MAX STREQUAL "EXCLUDE" AND PACKAGE_VERSION VERSION_LESS PACKAGE_FIND_VERSION_MAX))) - set(PACKAGE_VERSION_COMPATIBLE TRUE) - else() - set(PACKAGE_VERSION_COMPATIBLE FALSE) - endif() - else() - if(PACKAGE_FIND_VERSION_MAJOR STREQUAL CVF_VERSION_MAJOR) - set(PACKAGE_VERSION_COMPATIBLE TRUE) - else() - set(PACKAGE_VERSION_COMPATIBLE FALSE) - endif() - - if(PACKAGE_FIND_VERSION STREQUAL PACKAGE_VERSION) - set(PACKAGE_VERSION_EXACT TRUE) - endif() - endif() -endif() - - -# if the installed project requested no architecture check, don't perform the check -if("@CVF_ARCH_INDEPENDENT@") - return() -endif() - -# if the installed or the using project don't have CMAKE_SIZEOF_VOID_P set, ignore it: -if("${CMAKE_SIZEOF_VOID_P}" STREQUAL "" OR "@CMAKE_SIZEOF_VOID_P@" STREQUAL "") - return() -endif() - -# check that the installed version has the same 32/64bit-ness as the one which is currently searching: -if(NOT CMAKE_SIZEOF_VOID_P STREQUAL "@CMAKE_SIZEOF_VOID_P@") - math(EXPR installedBits "@CMAKE_SIZEOF_VOID_P@ * 8") - set(PACKAGE_VERSION "${PACKAGE_VERSION} (${installedBits}bit)") - set(PACKAGE_VERSION_UNSUITABLE TRUE) -endif() diff --git a/cmake/pkg-config.pc.in b/cmake/pkg-config.pc.in deleted file mode 100644 index 56fc07d..0000000 --- a/cmake/pkg-config.pc.in +++ /dev/null @@ -1,4 +0,0 @@ -Name: ${PROJECT_NAME} -Description: Koinos State DB Library -Version: ${PROJECT_VERSION} -Cflags: -I${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR} diff --git a/libraries/state_db/CMakeLists.txt b/libraries/state_db/CMakeLists.txt index 94d87f3..220d1e2 100644 --- a/libraries/state_db/CMakeLists.txt +++ b/libraries/state_db/CMakeLists.txt @@ -1,49 +1,53 @@ -file(GLOB HEADERS - "include/koinos/state_db/*.hpp" - "include/koinos/state_db/detail/*.hpp" - "include/koinos/state_db/backends/*.hpp" - "include/koinos/state_db/backends/map/*.hpp" - "include/koinos/state_db/backends/rocksdb/*.hpp") -add_library(koinos_state_db - state_db.cpp - detail/state_delta.cpp - detail/merge_iterator.cpp - backends/backend.cpp - backends/iterator.cpp - backends/map/map_backend.cpp - backends/map/map_iterator.cpp - backends/rocksdb/rocksdb_backend.cpp - backends/rocksdb/rocksdb_iterator.cpp - backends/rocksdb/object_cache.cpp - ${HEADERS} ) +set(HEADERS + include/koinos/state_db/state_db_types.hpp + include/koinos/state_db/state_db.hpp + include/koinos/state_db/backends/backend.hpp + include/koinos/state_db/backends/exceptions.hpp + include/koinos/state_db/backends/iterator.hpp + include/koinos/state_db/backends/types.hpp + include/koinos/state_db/backends/map/map_backend.hpp + include/koinos/state_db/backends/map/map_iterator.hpp + include/koinos/state_db/backends/rocksdb/exceptions.hpp + include/koinos/state_db/backends/rocksdb/object_cache.hpp + include/koinos/state_db/backends/rocksdb/rocksdb_backend.hpp + include/koinos/state_db/backends/rocksdb/rocksdb_iterator.hpp + include/koinos/state_db/detail/merge_iterator.hpp + include/koinos/state_db/detail/state_delta.hpp) -target_link_libraries(koinos_state_db Koinos::exception Koinos::proto Koinos::crypto RocksDB::rocksdb) -target_include_directories(koinos_state_db PUBLIC - $ - $ -) +add_library(state_db + state_db.cpp + detail/state_delta.cpp + detail/merge_iterator.cpp + backends/backend.cpp + backends/iterator.cpp + backends/map/map_backend.cpp + backends/map/map_iterator.cpp + backends/rocksdb/rocksdb_backend.cpp + backends/rocksdb/rocksdb_iterator.cpp + backends/rocksdb/object_cache.cpp + ${HEADERS}) -add_library(Koinos::state_db ALIAS koinos_state_db) +target_link_libraries( + state_db + PUBLIC + Koinos::exception + Koinos::proto + Koinos::crypto + RocksDB::rocksdb) -install(FILES ${HEADERS} DESTINATION "include/koinos/state_db") +koinos_add_format_target(state_db) -export( - TARGETS ${KOINOS_LIB_TARGET_NAME} - NAMESPACE Koinos:: - FILE ${KOINOS_LIB_CMAKE_PROJECT_TARGETS_FILE} +target_include_directories( + state_db + PUBLIC + $ + $ ) -install( - TARGETS ${KOINOS_LIB_TARGET_NAME} - EXPORT ${KOINOS_LIB_TARGETS_EXPORT_NAME} - INCLUDES DESTINATION ${KOINOS_LIB_INCLUDE_INSTALL_DIR} - RUNTIME DESTINATION bin - LIBRARY DESTINATION lib - ARCHIVE DESTINATION lib -) +add_library(Koinos::state_db ALIAS state_db) install( - EXPORT ${KOINOS_LIB_TARGETS_EXPORT_NAME} - NAMESPACE Koinos:: - DESTINATION ${KOINOS_LIB_CONFIG_INSTALL_DIR} -) + FILES + ${HEADERS} + DESTINATION + "include/koinos/state_db") diff --git a/libraries/state_db/include/koinos/state_db/backends/types.hpp b/libraries/state_db/include/koinos/state_db/backends/types.hpp index e2c932b..216cda2 100644 --- a/libraries/state_db/include/koinos/state_db/backends/types.hpp +++ b/libraries/state_db/include/koinos/state_db/backends/types.hpp @@ -1,5 +1,6 @@ #pragma once +#include #include namespace koinos::state_db::backends::detail { diff --git a/tests/BoostTestTargetConfig.h b/tests/BoostTestTargetConfig.h deleted file mode 100644 index dd3cdda..0000000 --- a/tests/BoostTestTargetConfig.h +++ /dev/null @@ -1,7 +0,0 @@ -// Small header computed by CMake to set up boost test. -// include AFTER #define BOOST_TEST_MODULE whatever -// but before any other boost test includes. - -// Using the Boost UTF static library - -#include diff --git a/tests/BoostTestTargets.cmake b/tests/BoostTestTargets.cmake deleted file mode 100644 index 799c902..0000000 --- a/tests/BoostTestTargets.cmake +++ /dev/null @@ -1,242 +0,0 @@ -# - Add tests using boost::test -# -# Add this line to your test files in place of including a basic boost test header: -# #include -# -# If you cannot do that and must use the included form for a given test, -# include the line -# // OVERRIDE_BOOST_TEST_INCLUDED_WARNING -# in the same file with the boost test include. -# -# include(BoostTestTargets) -# add_boost_test( SOURCES [] -# [FAIL_REGULAR_EXPRESSION ] -# [LAUNCHER ] -# [LIBRARIES [...]] -# [RESOURCES [...]] -# [TESTS [...]]) -# -# If for some reason you need access to the executable target created, -# it can be found in ${${testdriver_name}_TARGET_NAME} as specified when -# you called add_boost_test -# -# Requires CMake 2.6 or newer (uses the 'function' command) -# -# Requires: -# GetForceIncludeDefinitions -# CopyResourcesToBuildTree -# -# Original Author: -# 2009-2010 Ryan Pavlik -# http://academic.cleardefinition.com -# Iowa State University HCI Graduate Program/VRAC -# -# Copyright Iowa State University 2009-2010. -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or copy at -# http://www.boost.org/LICENSE_1_0.txt) - -if(__add_boost_test) - return() -endif() -set(__add_boost_test YES) - -set(BOOST_TEST_TARGET_PREFIX "boosttest") - -if(NOT Boost_FOUND) - find_package(Boost 1.34.0 QUIET) -endif() - -include(GetForceIncludeDefinitions.cmake) -include(CopyResourcesToBuildTree.cmake) - -if(Boost_FOUND) - set(_boosttesttargets_libs) - set(_boostConfig "BoostTestTargetsIncluded.h") - if(NOT Boost_UNIT_TEST_FRAMEWORK_LIBRARY) - find_package(Boost 1.34.0 QUIET COMPONENTS unit_test_framework) - endif() - if(Boost_UNIT_TEST_FRAMEWORK_LIBRARY) - set(_boosttesttargets_libs "${Boost_UNIT_TEST_FRAMEWORK_LIBRARY}") - if(Boost_USE_STATIC_LIBS) - set(_boostConfig "BoostTestTargetsStatic.h") - else() - if(NOT APPLE) - set(_boostConfig "BoostTestTargetsDynamic.h") - endif() - endif() - endif() - get_filename_component(_moddir ${CMAKE_CURRENT_LIST_FILE} PATH) - configure_file("${_moddir}/${_boostConfig}" - "${CMAKE_CURRENT_BINARY_DIR}/BoostTestTargetConfig.h" - COPYONLY) - include_directories("${CMAKE_CURRENT_BINARY_DIR}") -endif() - -function(add_boost_test _name) - if(NOT BUILD_TESTING) - return() - endif() - - # parse arguments - set(_nowhere) - set(_curdest _nowhere) - set(_val_args - SOURCES - FAIL_REGULAR_EXPRESSION - LAUNCHER - LIBRARIES - RESOURCES - TESTS) - set(_bool_args - USE_COMPILED_LIBRARY) - foreach(_arg ${_val_args} ${_bool_args}) - set(${_arg}) - endforeach() - foreach(_element ${ARGN}) - list(FIND _val_args "${_element}" _val_arg_find) - list(FIND _bool_args "${_element}" _bool_arg_find) - if("${_val_arg_find}" GREATER "-1") - set(_curdest "${_element}") - elseif("${_bool_arg_find}" GREATER "-1") - set("${_element}" ON) - set(_curdest _nowhere) - else() - list(APPEND ${_curdest} "${_element}") - endif() - endforeach() - - if(_nowhere) - message(FATAL_ERROR "Syntax error in use of add_boost_test!") - endif() - - if(NOT SOURCES) - message(FATAL_ERROR - "Syntax error in use of add_boost_test: at least one source file required!") - endif() - - if(Boost_FOUND) - - include_directories(${Boost_INCLUDE_DIRS}) - - set(includeType) - foreach(src ${SOURCES}) - file(READ ${src} thefile) - if("${thefile}" MATCHES ".*BoostTestTargetConfig.h.*") - set(includeType CONFIGURED) - set(includeFileLoc ${src}) - break() - elseif("${thefile}" MATCHES ".*boost/test/included/unit_test.hpp.*") - set(includeType INCLUDED) - set(includeFileLoc ${src}) - set(_boosttesttargets_libs) # clear this out - linking would be a bad idea - if(NOT - "${thefile}" - MATCHES - ".*OVERRIDE_BOOST_TEST_INCLUDED_WARNING.*") - message("Please replace the include line in ${src} with this alternate include line instead:") - message(" \#include ") - message("Once you've saved your changes, re-run CMake. (See BoostTestTargets.cmake for more info)") - endif() - break() - endif() - endforeach() - - if(NOT _boostTestTargetsNagged${_name} STREQUAL "${includeType}") - if("${includeType}" STREQUAL "CONFIGURED") - message(STATUS - "Test '${_name}' uses the CMake-configurable form of the boost test framework - congrats! (Including File: ${includeFileLoc})") - elseif("${includeType}" STREQUAL "INCLUDED") - message("In test '${_name}': ${includeFileLoc} uses the 'included' form of the boost unit test framework.") - else() - message("In test '${_name}': Didn't detect the CMake-configurable boost test include.") - message("Please replace your existing boost test include in that test with the following:") - message(" \#include ") - message("Once you've saved your changes, re-run CMake. (See BoostTestTargets.cmake for more info)") - endif() - endif() - set(_boostTestTargetsNagged${_name} - "${includeType}" - CACHE - INTERNAL - "" - FORCE) - - - if(RESOURCES) - list(APPEND SOURCES ${RESOURCES}) - endif() - - # Generate a unique target name, using the relative binary dir - # and provided name. (transform all / into _ and remove all other - # non-alphabet characters) - file(RELATIVE_PATH - targetpath - "${CMAKE_BINARY_DIR}" - "${CMAKE_CURRENT_BINARY_DIR}") - string(REGEX REPLACE "[^A-Za-z/_]" "" targetpath "${targetpath}") - string(REPLACE "/" "_" targetpath "${targetpath}") - - set(_target_name ${_name}) - set(${_name}_TARGET_NAME "${_target_name}" PARENT_SCOPE) - - # Build the test. - add_executable(${_target_name} ${SOURCES}) - - list(APPEND LIBRARIES ${_boosttesttargets_libs}) - - if(LIBRARIES) - target_link_libraries(${_target_name} ${LIBRARIES}) - endif() - - if(RESOURCES) - set_property(TARGET ${_target_name} PROPERTY RESOURCE ${RESOURCES}) - copy_resources_to_build_tree(${_target_name}) - endif() - - if(NOT Boost_TEST_FLAGS) -# set(Boost_TEST_FLAGS --catch_system_error=yes --output_format=XML) - set(Boost_TEST_FLAGS --catch_system_error=yes) - endif() - - # TODO: Figure out why only recent boost handles individual test running properly - - if(LAUNCHER) - set(_test_command ${LAUNCHER} "\$") - else() - set(_test_command ${_target_name}) - endif() - - if(TESTS) - foreach(_test ${TESTS}) - add_test( - ${_name}-${_test} - ${_test_command} --run_test=${_test} ${Boost_TEST_FLAGS} - ) - if(FAIL_REGULAR_EXPRESSION) - set_tests_properties(${_name}-${_test} - PROPERTIES - FAIL_REGULAR_EXPRESSION - "${FAIL_REGULAR_EXPRESSION}") - endif() - endforeach() - else() - add_test( - ${_name}-boost_test - ${_test_command} ${Boost_TEST_FLAGS} - ) - if(FAIL_REGULAR_EXPRESSION) - set_tests_properties(${_name}-boost_test - PROPERTIES - FAIL_REGULAR_EXPRESSION - "${FAIL_REGULAR_EXPRESSION}") - endif() - endif() - - # CppCheck the test if we can. - if(COMMAND add_cppcheck) - add_cppcheck(${_target_name} STYLE UNUSED_FUNCTIONS) - endif() - - endif() -endfunction() diff --git a/tests/BoostTestTargetsDynamic.h b/tests/BoostTestTargetsDynamic.h deleted file mode 100644 index 4bff567..0000000 --- a/tests/BoostTestTargetsDynamic.h +++ /dev/null @@ -1,8 +0,0 @@ -// Small header computed by CMake to set up boost test. -// include AFTER #define BOOST_TEST_MODULE whatever -// but before any other boost test includes. - -// Using the Boost UTF dynamic library - -#define BOOST_TEST_DYN_LINK -#include diff --git a/tests/BoostTestTargetsIncluded.h b/tests/BoostTestTargetsIncluded.h deleted file mode 100644 index 253133c..0000000 --- a/tests/BoostTestTargetsIncluded.h +++ /dev/null @@ -1,7 +0,0 @@ -// Small header computed by CMake to set up boost test. -// include AFTER #define BOOST_TEST_MODULE whatever -// but before any other boost test includes. - -// Using the Boost UTF included framework - -#include diff --git a/tests/BoostTestTargetsStatic.h b/tests/BoostTestTargetsStatic.h deleted file mode 100644 index dd3cdda..0000000 --- a/tests/BoostTestTargetsStatic.h +++ /dev/null @@ -1,7 +0,0 @@ -// Small header computed by CMake to set up boost test. -// include AFTER #define BOOST_TEST_MODULE whatever -// but before any other boost test includes. - -// Using the Boost UTF static library - -#include diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index d4b29ed..edf6bdb 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -11,45 +11,9 @@ enable_testing() file(GLOB UNIT_TESTS "tests/*.cpp") file(GLOB_RECURSE TEST_FIXTURES "include/*.hpp") -include(BoostTestTargets.cmake) +koinos_parse_unit_tests(TEST_CASES ${UNIT_TESTS}) -function(parse_unit_tests RESULT) - set(SOURCES) - foreach(_element ${ARGN}) - list(APPEND SOURCES "${_element}") - endforeach() - - set(tests) - - foreach(src ${SOURCES}) - file(READ ${src} thefile) - string(REGEX MATCH "BOOST_FIXTURE_TEST_SUITE\\([A-Za-z0-9_,<> ]*\\)" test_suite "${thefile}" ) - - if( NOT (test_suite STREQUAL "") ) - string(SUBSTRING "${test_suite}" 25 -1 test_suite) - string(FIND "${test_suite}" "," comma_loc ) - string(SUBSTRING "${test_suite}" 0 ${comma_loc} test_suite) - string(STRIP "${test_suite}" test_suite) - - string( REGEX MATCHALL "BOOST_AUTO_TEST_CASE\\([A-Za-z0-9_,<> ]*\\)" cases "${thefile}" ) - - foreach( test_case ${cases} ) - string(SUBSTRING "${test_case}" 21 -1 test_case) - string(FIND "${test_case}" ")" paren_loc ) - string(SUBSTRING "${test_case}" 0 ${paren_loc} test_case) - string(STRIP "${test_case}" test_case) - - list(APPEND tests "${test_suite}/${test_case}") - endforeach() - endif() - endforeach() - - set(${RESULT} ${tests} PARENT_SCOPE) -endfunction() - -parse_unit_tests(TEST_CASES ${UNIT_TESTS}) - -add_boost_test(koinos_state_db_tests +koinos_add_test(koinos_state_db_tests SOURCES ${UNIT_TESTS} ${TEST_FIXTURES} TESTS ${TEST_CASES} ) diff --git a/tests/CopyResourcesToBuildTree.cmake b/tests/CopyResourcesToBuildTree.cmake deleted file mode 100644 index 3512cc4..0000000 --- a/tests/CopyResourcesToBuildTree.cmake +++ /dev/null @@ -1,83 +0,0 @@ -# - Copy the resources your app needs to the build tree. -# -# copy_resources_to_build_tree() -# -# Requires CMake 2.6 or newer (uses the 'function' command) -# -# Original Author: -# 2009-2010 Ryan Pavlik -# http://academic.cleardefinition.com -# Iowa State University HCI Graduate Program/VRAC -# -# Copyright Iowa State University 2009-2010. -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or copy at -# http://www.boost.org/LICENSE_1_0.txt) - -if(__copy_resources_to_build_tree) - return() -endif() -set(__copy_resources_to_build_tree YES) - -function(copy_resources_to_build_tree _target) - get_target_property(_resources ${_target} RESOURCE) - if(NOT _resources) - # Bail if no resources - message(STATUS - "Told to copy resources for target ${_target}, but " - "no resources are set!") - return() - endif() - - get_target_property(_path ${_target} LOCATION) - get_filename_component(_path "${_path}" PATH) - - if(NOT MSVC AND NOT "${CMAKE_GENERATOR}" MATCHES "Makefiles") - foreach(_config ${CMAKE_CONFIGURATION_TYPES}) - get_target_property(_path${_config} ${_target} LOCATION_${_config}) - get_filename_component(_path${_config} "${_path${_config}}" PATH) - add_custom_command(TARGET ${_target} - POST_BUILD - COMMAND - ${CMAKE_COMMAND} - ARGS -E make_directory "${_path${_config}}/" - COMMENT "Creating directory ${_path${_config}}/") - endforeach() - endif() - - foreach(_res ${_resources}) - if(NOT IS_ABSOLUTE "${_res}") - get_filename_component(_res "${_res}" ABSOLUTE) - endif() - get_filename_component(_name "${_res}" NAME) - - if(MSVC) - # Working dir is solution file dir, not exe file dir. - add_custom_command(TARGET ${_target} - POST_BUILD - COMMAND - ${CMAKE_COMMAND} - ARGS -E copy "${_res}" "${CMAKE_BINARY_DIR}/" - COMMENT "Copying ${_name} to ${CMAKE_BINARY_DIR}/ for MSVC") - else() - if("${CMAKE_GENERATOR}" MATCHES "Makefiles") - add_custom_command(TARGET ${_target} - POST_BUILD - COMMAND - ${CMAKE_COMMAND} - ARGS -E copy "${_res}" "${_path}/" - COMMENT "Copying ${_name} to ${_path}/") - else() - foreach(_config ${CMAKE_CONFIGURATION_TYPES}) - add_custom_command(TARGET ${_target} - POST_BUILD - COMMAND - ${CMAKE_COMMAND} - ARGS -E copy "${_res}" "${_path${_config}}" - COMMENT "Copying ${_name} to ${_path${_config}}") - endforeach() - - endif() - endif() - endforeach() -endfunction() diff --git a/tests/GetForceIncludeDefinitions.cmake b/tests/GetForceIncludeDefinitions.cmake deleted file mode 100644 index efcca04..0000000 --- a/tests/GetForceIncludeDefinitions.cmake +++ /dev/null @@ -1,44 +0,0 @@ -# - Get the platform-appropriate flags to add to force inclusion of a file -# -# The most common use of this is to use a generated config.h-type file -# placed out of the source tree in all files. -# -# get_force_include_definitions(var forcedincludefiles...) - -# where var is the name of your desired output variable, and everything -# else is a source file to forcibly include. -# a list item to be filtered. -# -# Original Author: -# 2009-2010 Ryan Pavlik -# http://academic.cleardefinition.com -# Iowa State University HCI Graduate Program/VRAC -# -# Copyright Iowa State University 2009-2010. -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or copy at -# http://www.boost.org/LICENSE_1_0.txt) - -if(__get_force_include_definitions) - return() -endif() -set(__get_force_include_definitions YES) - -function(get_force_include_definitions var) - set(_flagprefix) - if(CMAKE_COMPILER_IS_GNUCXX) - set(_flag "-include") - elseif(MSVC) - set(_flag "/FI") - else() - message(SEND_ERROR "You don't seem to be using MSVC or GCC, but") - message(SEND_ERROR "the project called get_force_include_definitions.") - message(SEND_ERROR "Contact this project with the name of your") - message(FATAL_ERROR "compiler and preferably the flag to force includes") - endif() - - set(_out) - foreach(_item ${ARGN}) - list(APPEND _out "${_flag} \"${_item}\"") - endforeach() - set(${var} "${_out}" PARENT_SCOPE) -endfunction() diff --git a/tests/tests/main.cpp b/tests/tests/main.cpp index b9ec7ab..339b70a 100644 --- a/tests/tests/main.cpp +++ b/tests/tests/main.cpp @@ -1,3 +1,3 @@ #define BOOST_TEST_MODULE koinos_state_db_tests -#include +#include #include From 631f415a8b0980162ed1ba1909e5210aedc72be6 Mon Sep 17 00:00:00 2001 From: Michael Vandeberg Date: Thu, 7 Dec 2023 15:06:32 -0800 Subject: [PATCH 02/26] Refactor project to use Pitchfork specification --- .gitmodules | 3 - .travis.yml | 6 +- CMakeLists.txt | 62 +++++----------- LICENSE.md | 2 - README.md | 74 ++++++++++++++++++- cmake | 1 - .../koinos/state_db/backends/backend.hpp | 0 .../koinos/state_db/backends/exceptions.hpp | 0 .../koinos/state_db/backends/iterator.hpp | 0 .../state_db/backends/map/map_backend.hpp | 0 .../state_db/backends/map/map_iterator.hpp | 0 .../state_db/backends/rocksdb/exceptions.hpp | 0 .../backends/rocksdb/object_cache.hpp | 0 .../backends/rocksdb/rocksdb_backend.hpp | 0 .../backends/rocksdb/rocksdb_iterator.hpp | 0 .../koinos/state_db/backends/types.hpp | 0 .../koinos/state_db/state_db.hpp | 0 .../koinos/state_db/state_db_types.hpp | 0 libraries/CMakeLists.txt | 1 - libraries/state_db/CMakeLists.txt | 53 ------------- src/CMakeLists.txt | 53 +++++++++++++ .../state_db => src}/backends/backend.cpp | 0 .../state_db => src}/backends/iterator.cpp | 0 .../backends/map/map_backend.cpp | 0 .../backends/map/map_iterator.cpp | 0 .../backends/rocksdb/object_cache.cpp | 0 .../backends/rocksdb/rocksdb_backend.cpp | 0 .../backends/rocksdb/rocksdb_iterator.cpp | 0 .../detail => src}/merge_iterator.cpp | 2 +- .../detail => src}/merge_iterator.hpp | 2 +- {libraries/state_db => src}/state_db.cpp | 5 +- .../state_db/detail => src}/state_delta.cpp | 8 +- .../state_db/detail => src}/state_delta.hpp | 0 tests/CMakeLists.txt | 19 ++--- tests/{tests => }/main.cpp | 0 tests/{tests => }/state_db_test.cpp | 7 +- {ci => tools/ci}/after_success.sh | 0 {ci => tools/ci}/build.sh | 0 {ci => tools/ci}/ccache_clang | 0 {ci => tools/ci}/ccache_clang++ | 0 {ci => tools/ci}/install.sh | 0 {ci => tools/ci}/test.sh | 2 +- 42 files changed, 169 insertions(+), 131 deletions(-) delete mode 100644 .gitmodules delete mode 160000 cmake rename {libraries/state_db/include => include}/koinos/state_db/backends/backend.hpp (100%) rename {libraries/state_db/include => include}/koinos/state_db/backends/exceptions.hpp (100%) rename {libraries/state_db/include => include}/koinos/state_db/backends/iterator.hpp (100%) rename {libraries/state_db/include => include}/koinos/state_db/backends/map/map_backend.hpp (100%) rename {libraries/state_db/include => include}/koinos/state_db/backends/map/map_iterator.hpp (100%) rename {libraries/state_db/include => include}/koinos/state_db/backends/rocksdb/exceptions.hpp (100%) rename {libraries/state_db/include => include}/koinos/state_db/backends/rocksdb/object_cache.hpp (100%) rename {libraries/state_db/include => include}/koinos/state_db/backends/rocksdb/rocksdb_backend.hpp (100%) rename {libraries/state_db/include => include}/koinos/state_db/backends/rocksdb/rocksdb_iterator.hpp (100%) rename {libraries/state_db/include => include}/koinos/state_db/backends/types.hpp (100%) rename {libraries/state_db/include => include}/koinos/state_db/state_db.hpp (100%) rename {libraries/state_db/include => include}/koinos/state_db/state_db_types.hpp (100%) delete mode 100644 libraries/CMakeLists.txt delete mode 100644 libraries/state_db/CMakeLists.txt create mode 100644 src/CMakeLists.txt rename {libraries/state_db => src}/backends/backend.cpp (100%) rename {libraries/state_db => src}/backends/iterator.cpp (100%) rename {libraries/state_db => src}/backends/map/map_backend.cpp (100%) rename {libraries/state_db => src}/backends/map/map_iterator.cpp (100%) rename {libraries/state_db => src}/backends/rocksdb/object_cache.cpp (100%) rename {libraries/state_db => src}/backends/rocksdb/rocksdb_backend.cpp (100%) rename {libraries/state_db => src}/backends/rocksdb/rocksdb_iterator.cpp (100%) rename {libraries/state_db/detail => src}/merge_iterator.cpp (99%) rename {libraries/state_db/include/koinos/state_db/detail => src}/merge_iterator.hpp (99%) rename {libraries/state_db => src}/state_db.cpp (99%) rename {libraries/state_db/detail => src}/state_delta.cpp (99%) rename {libraries/state_db/include/koinos/state_db/detail => src}/state_delta.hpp (100%) rename tests/{tests => }/main.cpp (100%) rename tests/{tests => }/state_db_test.cpp (99%) rename {ci => tools/ci}/after_success.sh (100%) rename {ci => tools/ci}/build.sh (100%) rename {ci => tools/ci}/ccache_clang (100%) rename {ci => tools/ci}/ccache_clang++ (100%) rename {ci => tools/ci}/install.sh (100%) rename {ci => tools/ci}/test.sh (86%) diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index e763590..0000000 --- a/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "cmake"] - path = cmake - url = https://github.com/koinos/koinos-cmake.git diff --git a/.travis.yml b/.travis.yml index 4391c15..8f96854 100644 --- a/.travis.yml +++ b/.travis.yml @@ -34,13 +34,13 @@ before_install: - eval "${MATRIX_EVAL}" install: - - ci/install.sh + - tools/ci/install.sh script: - - ci/build.sh && ci/test.sh + - tools/ci/build.sh && tools/ci/test.sh after_success: - - ci/after_success.sh + - tools/ci/after_success.sh notifications: slack: diff --git a/CMakeLists.txt b/CMakeLists.txt index ca4f71c..3b48234 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -2,11 +2,25 @@ cmake_minimum_required(VERSION 3.19.0) cmake_policy(SET CMP0074 NEW) -cmake_policy(SET CMP0135 NEW) cmake_policy(SET CMP0114 NEW) -cmake_policy(SET CMP0144 NEW) -include(cmake/Koinos.cmake) +if(${CMAKE_VERSION} VERSION_GREATER_EQUAL 3.24.0) + cmake_policy(SET CMP0135 NEW) +endif() + +if(${CMAKE_VERSION} VERSION_GREATER_EQUAL 3.27.0) + cmake_policy(SET CMP0144 NEW) +endif() + +include(FetchContent) +FetchContent_Declare( + koinos_cmake + GIT_REPOSITORY https://github.com/koinos/koinos-cmake.git + GIT_TAG f3d5dd68611a515837b2a038cc93365ad12e8c41 +) +FetchContent_MakeAvailable(koinos_cmake) + +include("${koinos_cmake_SOURCE_DIR}/Koinos.cmake") project(koinos_state_db VERSION 1.1.0 @@ -17,7 +31,7 @@ koinos_define_version() koinos_coverage( EXECUTABLE - koinos_statedb_tests + koinos_statedb_tests EXCLUDE "tests/*" ) @@ -45,43 +59,7 @@ koinos_add_package(koinos_util CONFIG REQUIRED) include(GNUInstallDirs) include(CMakePackageConfigHelpers) -add_subdirectory(libraries) +add_subdirectory(src) add_subdirectory(tests) -export( - TARGETS state_db - NAMESPACE Koinos:: - FILE ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-targets.cmake -) - -install( - TARGETS state_db - EXPORT ${PROJECT_NAME}-targets - INCLUDES DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} - RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} - LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} - ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} -) - -install( - EXPORT ${PROJECT_NAME}-targets - NAMESPACE Koinos:: - DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME} -) - -configure_package_config_file( - cmake/Templates/project.cmake.in - ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config.cmake - INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}) - -write_basic_package_version_file( - ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config-version.cmake - VERSION ${PROJECT_VERSION} - COMPATIBILITY SameMajorVersion) - -install( - FILES - ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config.cmake - ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config-version.cmake - DESTINATION - ${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}) +koinos_install(TARGETS state_db) diff --git a/LICENSE.md b/LICENSE.md index ba4f52c..945eead 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -4,6 +4,4 @@ Permission is hereby granted, free of charge, to any person obtaining a copy of The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. -The Software is not used with any blockchain or blockchain fork that is not recognized by Koinos Group, LLC in writing prior to Dec 31, 2022. - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/README.md b/README.md index a9c4c82..28f005a 100644 --- a/README.md +++ b/README.md @@ -1 +1,73 @@ -# koinos-repo-template +# Koinos StateDB Cpp + +This library implements StateDB, a fork aware persistent database, for the Koinos Blockchain Framework. + +### Project Structure + +This project's structure follows the [Pitchfork](https://api.csswg.org/bikeshed/?force=1&url=https://raw.githubusercontent.com/vector-of-bool/pitchfork/develop/data/spec.bs) specification. + +**`build`**: An ephemeral directory for building the project. Not checked in, but excluded via `.gitignore`. + +**`include`**: Contains all public headers for the Koinos StateDB. + +**`src`**: Contains all source code and private headers for Koinos StateDB. + +**`tests`**: Contains tests for Koinos StateDB. + +**`tools`**: Contains additional tooling for Koinos StateDB, primarily CI scripts. + +### Building + +Koinos StateDB's build process is configured using CMake. Additionally, all dependencies are managed through Hunter, a CMake drive package manager for C/C++. This means that all dependencies are downloaded and built during configuration rather than relying on system installed libraries. + +``` +mkdir build +cd build +cmake -D CMAKE_BUILD_TYPE=Release .. +cmake --build . --config Release --parallel +``` + +You can optionally run static analysis with Clang-Tidy during the build process. Static analysis is checked in CI and is required to pass before merging pull requests. + +``` +cmake -D STATIC_ANALYSIS=ON .. +``` + +### Testing + +Tests are built by default as target `koinos_state_db_tests`. You can building them specifically with: + +``` +cmake --build . --config Release --parallel --target koinos_state_db_tests +``` + +Tests can be invoked from the tests directiory within the build directory. + +``` +cd tests +./koinos_state_db_tests +``` + +Tests can also be ran in parallel using CTest. + +``` +cd tests +ctest -j +``` + +You can also generate a coverage report. + +``` +cmake -D CMAKE_BUILD_TYPE=Debug -D COVERAGE=ON .. +cmake --build . --config Debug --parallel 3 --target coverage +``` + +### Formatting + +Formatting of the source code is enforced by ClangFormat. If ClangFormat is installed, build targets will be automatically generated. You can review the library's code style by uploading the included `.clang-format` to https://clang-format-configurator.site/. + +You can build `format.check` to check formattting and `format.fix` to attempt to automatically fix formatting. It is recommended to check and manually fix formatting as automatic formatting can unintentionally change code. + +### Contributing + +As an open source project, contributions are welcome and appreciated. Before contributing, please read our [Contribution Guidelines](CONTRIBUTING.md). diff --git a/cmake b/cmake deleted file mode 160000 index f832097..0000000 --- a/cmake +++ /dev/null @@ -1 +0,0 @@ -Subproject commit f832097251b21eb278a626bff6146ca9df594406 diff --git a/libraries/state_db/include/koinos/state_db/backends/backend.hpp b/include/koinos/state_db/backends/backend.hpp similarity index 100% rename from libraries/state_db/include/koinos/state_db/backends/backend.hpp rename to include/koinos/state_db/backends/backend.hpp diff --git a/libraries/state_db/include/koinos/state_db/backends/exceptions.hpp b/include/koinos/state_db/backends/exceptions.hpp similarity index 100% rename from libraries/state_db/include/koinos/state_db/backends/exceptions.hpp rename to include/koinos/state_db/backends/exceptions.hpp diff --git a/libraries/state_db/include/koinos/state_db/backends/iterator.hpp b/include/koinos/state_db/backends/iterator.hpp similarity index 100% rename from libraries/state_db/include/koinos/state_db/backends/iterator.hpp rename to include/koinos/state_db/backends/iterator.hpp diff --git a/libraries/state_db/include/koinos/state_db/backends/map/map_backend.hpp b/include/koinos/state_db/backends/map/map_backend.hpp similarity index 100% rename from libraries/state_db/include/koinos/state_db/backends/map/map_backend.hpp rename to include/koinos/state_db/backends/map/map_backend.hpp diff --git a/libraries/state_db/include/koinos/state_db/backends/map/map_iterator.hpp b/include/koinos/state_db/backends/map/map_iterator.hpp similarity index 100% rename from libraries/state_db/include/koinos/state_db/backends/map/map_iterator.hpp rename to include/koinos/state_db/backends/map/map_iterator.hpp diff --git a/libraries/state_db/include/koinos/state_db/backends/rocksdb/exceptions.hpp b/include/koinos/state_db/backends/rocksdb/exceptions.hpp similarity index 100% rename from libraries/state_db/include/koinos/state_db/backends/rocksdb/exceptions.hpp rename to include/koinos/state_db/backends/rocksdb/exceptions.hpp diff --git a/libraries/state_db/include/koinos/state_db/backends/rocksdb/object_cache.hpp b/include/koinos/state_db/backends/rocksdb/object_cache.hpp similarity index 100% rename from libraries/state_db/include/koinos/state_db/backends/rocksdb/object_cache.hpp rename to include/koinos/state_db/backends/rocksdb/object_cache.hpp diff --git a/libraries/state_db/include/koinos/state_db/backends/rocksdb/rocksdb_backend.hpp b/include/koinos/state_db/backends/rocksdb/rocksdb_backend.hpp similarity index 100% rename from libraries/state_db/include/koinos/state_db/backends/rocksdb/rocksdb_backend.hpp rename to include/koinos/state_db/backends/rocksdb/rocksdb_backend.hpp diff --git a/libraries/state_db/include/koinos/state_db/backends/rocksdb/rocksdb_iterator.hpp b/include/koinos/state_db/backends/rocksdb/rocksdb_iterator.hpp similarity index 100% rename from libraries/state_db/include/koinos/state_db/backends/rocksdb/rocksdb_iterator.hpp rename to include/koinos/state_db/backends/rocksdb/rocksdb_iterator.hpp diff --git a/libraries/state_db/include/koinos/state_db/backends/types.hpp b/include/koinos/state_db/backends/types.hpp similarity index 100% rename from libraries/state_db/include/koinos/state_db/backends/types.hpp rename to include/koinos/state_db/backends/types.hpp diff --git a/libraries/state_db/include/koinos/state_db/state_db.hpp b/include/koinos/state_db/state_db.hpp similarity index 100% rename from libraries/state_db/include/koinos/state_db/state_db.hpp rename to include/koinos/state_db/state_db.hpp diff --git a/libraries/state_db/include/koinos/state_db/state_db_types.hpp b/include/koinos/state_db/state_db_types.hpp similarity index 100% rename from libraries/state_db/include/koinos/state_db/state_db_types.hpp rename to include/koinos/state_db/state_db_types.hpp diff --git a/libraries/CMakeLists.txt b/libraries/CMakeLists.txt deleted file mode 100644 index 038e992..0000000 --- a/libraries/CMakeLists.txt +++ /dev/null @@ -1 +0,0 @@ -add_subdirectory(state_db) diff --git a/libraries/state_db/CMakeLists.txt b/libraries/state_db/CMakeLists.txt deleted file mode 100644 index 220d1e2..0000000 --- a/libraries/state_db/CMakeLists.txt +++ /dev/null @@ -1,53 +0,0 @@ -set(HEADERS - include/koinos/state_db/state_db_types.hpp - include/koinos/state_db/state_db.hpp - include/koinos/state_db/backends/backend.hpp - include/koinos/state_db/backends/exceptions.hpp - include/koinos/state_db/backends/iterator.hpp - include/koinos/state_db/backends/types.hpp - include/koinos/state_db/backends/map/map_backend.hpp - include/koinos/state_db/backends/map/map_iterator.hpp - include/koinos/state_db/backends/rocksdb/exceptions.hpp - include/koinos/state_db/backends/rocksdb/object_cache.hpp - include/koinos/state_db/backends/rocksdb/rocksdb_backend.hpp - include/koinos/state_db/backends/rocksdb/rocksdb_iterator.hpp - include/koinos/state_db/detail/merge_iterator.hpp - include/koinos/state_db/detail/state_delta.hpp) - -add_library(state_db - state_db.cpp - detail/state_delta.cpp - detail/merge_iterator.cpp - backends/backend.cpp - backends/iterator.cpp - backends/map/map_backend.cpp - backends/map/map_iterator.cpp - backends/rocksdb/rocksdb_backend.cpp - backends/rocksdb/rocksdb_iterator.cpp - backends/rocksdb/object_cache.cpp - ${HEADERS}) - -target_link_libraries( - state_db - PUBLIC - Koinos::exception - Koinos::proto - Koinos::crypto - RocksDB::rocksdb) - -koinos_add_format_target(state_db) - -target_include_directories( - state_db - PUBLIC - $ - $ -) - -add_library(Koinos::state_db ALIAS state_db) - -install( - FILES - ${HEADERS} - DESTINATION - "include/koinos/state_db") diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt new file mode 100644 index 0000000..0355756 --- /dev/null +++ b/src/CMakeLists.txt @@ -0,0 +1,53 @@ +add_library(state_db + state_db.cpp + state_delta.cpp + merge_iterator.cpp + backends/backend.cpp + backends/iterator.cpp + backends/map/map_backend.cpp + backends/map/map_iterator.cpp + backends/rocksdb/rocksdb_backend.cpp + backends/rocksdb/rocksdb_iterator.cpp + backends/rocksdb/object_cache.cpp + + merge_iterator.hpp + state_delta.hpp + + ${PROJECT_SOURCE_DIR}/include/koinos/state_db/state_db_types.hpp + ${PROJECT_SOURCE_DIR}/include/koinos/state_db/state_db.hpp + ${PROJECT_SOURCE_DIR}/include/koinos/state_db/backends/backend.hpp + ${PROJECT_SOURCE_DIR}/include/koinos/state_db/backends/exceptions.hpp + ${PROJECT_SOURCE_DIR}/include/koinos/state_db/backends/iterator.hpp + ${PROJECT_SOURCE_DIR}/include/koinos/state_db/backends/types.hpp + ${PROJECT_SOURCE_DIR}/include/koinos/state_db/backends/map/map_backend.hpp + ${PROJECT_SOURCE_DIR}/include/koinos/state_db/backends/map/map_iterator.hpp + ${PROJECT_SOURCE_DIR}/include/koinos/state_db/backends/rocksdb/exceptions.hpp + ${PROJECT_SOURCE_DIR}/include/koinos/state_db/backends/rocksdb/object_cache.hpp + ${PROJECT_SOURCE_DIR}/include/koinos/state_db/backends/rocksdb/rocksdb_backend.hpp + ${PROJECT_SOURCE_DIR}/include/koinos/state_db/backends/rocksdb/rocksdb_iterator.hpp) + +target_link_libraries( + state_db + PUBLIC + Koinos::exception + Koinos::proto + Koinos::crypto + RocksDB::rocksdb) + +koinos_add_format_target(state_db) + +target_include_directories( + state_db + PUBLIC + $ + $ +) + +add_library(Koinos::state_db ALIAS state_db) + +install( + DIRECTORY + ${PROJECT_SOURCE_DIR}/include + DESTINATION + include/ +) diff --git a/libraries/state_db/backends/backend.cpp b/src/backends/backend.cpp similarity index 100% rename from libraries/state_db/backends/backend.cpp rename to src/backends/backend.cpp diff --git a/libraries/state_db/backends/iterator.cpp b/src/backends/iterator.cpp similarity index 100% rename from libraries/state_db/backends/iterator.cpp rename to src/backends/iterator.cpp diff --git a/libraries/state_db/backends/map/map_backend.cpp b/src/backends/map/map_backend.cpp similarity index 100% rename from libraries/state_db/backends/map/map_backend.cpp rename to src/backends/map/map_backend.cpp diff --git a/libraries/state_db/backends/map/map_iterator.cpp b/src/backends/map/map_iterator.cpp similarity index 100% rename from libraries/state_db/backends/map/map_iterator.cpp rename to src/backends/map/map_iterator.cpp diff --git a/libraries/state_db/backends/rocksdb/object_cache.cpp b/src/backends/rocksdb/object_cache.cpp similarity index 100% rename from libraries/state_db/backends/rocksdb/object_cache.cpp rename to src/backends/rocksdb/object_cache.cpp diff --git a/libraries/state_db/backends/rocksdb/rocksdb_backend.cpp b/src/backends/rocksdb/rocksdb_backend.cpp similarity index 100% rename from libraries/state_db/backends/rocksdb/rocksdb_backend.cpp rename to src/backends/rocksdb/rocksdb_backend.cpp diff --git a/libraries/state_db/backends/rocksdb/rocksdb_iterator.cpp b/src/backends/rocksdb/rocksdb_iterator.cpp similarity index 100% rename from libraries/state_db/backends/rocksdb/rocksdb_iterator.cpp rename to src/backends/rocksdb/rocksdb_iterator.cpp diff --git a/libraries/state_db/detail/merge_iterator.cpp b/src/merge_iterator.cpp similarity index 99% rename from libraries/state_db/detail/merge_iterator.cpp rename to src/merge_iterator.cpp index d538bb2..4eb37ac 100644 --- a/libraries/state_db/detail/merge_iterator.cpp +++ b/src/merge_iterator.cpp @@ -1,4 +1,4 @@ -#include +#include "merge_iterator.hpp" namespace koinos::state_db::detail { diff --git a/libraries/state_db/include/koinos/state_db/detail/merge_iterator.hpp b/src/merge_iterator.hpp similarity index 99% rename from libraries/state_db/include/koinos/state_db/detail/merge_iterator.hpp rename to src/merge_iterator.hpp index efeea81..d873afc 100644 --- a/libraries/state_db/include/koinos/state_db/detail/merge_iterator.hpp +++ b/src/merge_iterator.hpp @@ -1,6 +1,6 @@ #pragma once -#include +#include "state_delta.hpp" #include diff --git a/libraries/state_db/state_db.cpp b/src/state_db.cpp similarity index 99% rename from libraries/state_db/state_db.cpp rename to src/state_db.cpp index 0ecfde6..7dc6bc7 100644 --- a/libraries/state_db/state_db.cpp +++ b/src/state_db.cpp @@ -1,9 +1,10 @@ +#include "merge_iterator.hpp" +#include "state_delta.hpp" + #include #include #include -#include -#include #include #include diff --git a/libraries/state_db/detail/state_delta.cpp b/src/state_delta.cpp similarity index 99% rename from libraries/state_db/detail/state_delta.cpp rename to src/state_delta.cpp index 67b1807..c566757 100644 --- a/libraries/state_db/detail/state_delta.cpp +++ b/src/state_delta.cpp @@ -1,4 +1,4 @@ -#include +#include "state_delta.hpp" #include @@ -355,14 +355,14 @@ std::vector< protocol::state_delta_entry > state_delta::get_delta_entries() cons entry.mutable_object_space()->set_system( db_key.space().system() ); entry.mutable_object_space()->set_zone( db_key.space().zone() ); entry.mutable_object_space()->set_id( db_key.space().id() ); - + entry.set_key( db_key.key() ); auto value = _backend->get( key ); - + // Set the optional field if not null if ( value != nullptr ) entry.set_value( *value ); - + deltas.push_back( entry ); } } diff --git a/libraries/state_db/include/koinos/state_db/detail/state_delta.hpp b/src/state_delta.hpp similarity index 100% rename from libraries/state_db/include/koinos/state_db/detail/state_delta.hpp rename to src/state_delta.hpp diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index edf6bdb..d858521 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -1,25 +1,18 @@ -find_package(Gperftools QUIET) -if(GPERFTOOLS_FOUND) - message(STATUS "Found gperftools; compiling tests with TCMalloc") - list(APPEND PLATFORM_SPECIFIC_LIBS tcmalloc) -endif() - include(CTest) enable_testing() -file(GLOB UNIT_TESTS "tests/*.cpp") -file(GLOB_RECURSE TEST_FIXTURES "include/*.hpp") - -koinos_parse_unit_tests(TEST_CASES ${UNIT_TESTS}) - koinos_add_test(koinos_state_db_tests - SOURCES ${UNIT_TESTS} ${TEST_FIXTURES} - TESTS ${TEST_CASES} + SOURCES + main.cpp + state_db_test.cpp ) +koinos_add_format_target(koinos_state_db_tests) + target_link_libraries(koinos_state_db_tests Koinos::proto Koinos::crypto Koinos::state_db Koinos::log Koinos::util Koinos::exception ${PLATFORM_SPECIFIC_LIBS}) target_include_directories(koinos_state_db_tests PUBLIC + ${PROJECT_SOURCE_DIR}/src # Private headers $ $ # /include ) diff --git a/tests/tests/main.cpp b/tests/main.cpp similarity index 100% rename from tests/tests/main.cpp rename to tests/main.cpp diff --git a/tests/tests/state_db_test.cpp b/tests/state_db_test.cpp similarity index 99% rename from tests/tests/state_db_test.cpp rename to tests/state_db_test.cpp index ba406ef..c89c9e6 100644 --- a/tests/tests/state_db_test.cpp +++ b/tests/state_db_test.cpp @@ -1,5 +1,8 @@ #include +#include "merge_iterator.hpp" +#include "state_delta.hpp" + #include #include #include @@ -7,8 +10,6 @@ #include #include #include -#include -#include #include #include #include @@ -868,7 +869,7 @@ BOOST_AUTO_TEST_CASE( get_delta_entries_test ) BOOST_CHECK_EQUAL( d_key, entries2[2].key() ); BOOST_CHECK_EQUAL( space.DebugString(), entries2[2].object_space().DebugString() ); BOOST_CHECK_EQUAL( d_val, entries2[2].value() ); - + } KOINOS_CATCH_LOG_AND_RETHROW(info) } BOOST_AUTO_TEST_CASE( rocksdb_backend_test ) diff --git a/ci/after_success.sh b/tools/ci/after_success.sh similarity index 100% rename from ci/after_success.sh rename to tools/ci/after_success.sh diff --git a/ci/build.sh b/tools/ci/build.sh similarity index 100% rename from ci/build.sh rename to tools/ci/build.sh diff --git a/ci/ccache_clang b/tools/ci/ccache_clang similarity index 100% rename from ci/ccache_clang rename to tools/ci/ccache_clang diff --git a/ci/ccache_clang++ b/tools/ci/ccache_clang++ similarity index 100% rename from ci/ccache_clang++ rename to tools/ci/ccache_clang++ diff --git a/ci/install.sh b/tools/ci/install.sh similarity index 100% rename from ci/install.sh rename to tools/ci/install.sh diff --git a/ci/test.sh b/tools/ci/test.sh similarity index 86% rename from ci/test.sh rename to tools/ci/test.sh index 60cf747..d0eb200 100755 --- a/ci/test.sh +++ b/tools/ci/test.sh @@ -3,7 +3,7 @@ set -e set -x -cd $(dirname "$0")/../build/tests +cd $TRAVIS_BUILD_DIR/build/tests if [ "$RUN_TYPE" = "test" ]; then exec ctest -j3 --output-on-failure elif [ "$RUN_TYPE" = "coverage" ]; then From 4eab8b496e10cf3b4e429931755f847e067ad67c Mon Sep 17 00:00:00 2001 From: Michael Vandeberg Date: Thu, 7 Dec 2023 16:37:38 -0800 Subject: [PATCH 03/26] Cleanup CMakeLists.txt --- CMakeLists.txt | 3 --- 1 file changed, 3 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 3b48234..6271782 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -56,9 +56,6 @@ koinos_add_package(koinos_log CONFIG REQUIRED) koinos_add_package(koinos_crypto CONFIG REQUIRED) koinos_add_package(koinos_util CONFIG REQUIRED) -include(GNUInstallDirs) -include(CMakePackageConfigHelpers) - add_subdirectory(src) add_subdirectory(tests) From 2371d3a5909f927aa7975a0ac731abf5d305fb3a Mon Sep 17 00:00:00 2001 From: Michael Vandeberg Date: Thu, 7 Dec 2023 19:31:46 -0800 Subject: [PATCH 04/26] Fix test target name for coverage --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 6271782..25bfd58 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -31,7 +31,7 @@ koinos_define_version() koinos_coverage( EXECUTABLE - koinos_statedb_tests + koinos_state_db_tests EXCLUDE "tests/*" ) From fb90310c7e342a25f0689c0955e645cdfdb3c270 Mon Sep 17 00:00:00 2001 From: Michael Vandeberg Date: Fri, 8 Dec 2023 17:15:58 -0800 Subject: [PATCH 05/26] Cleanup CMakeLists --- .clang-format | 152 +++++++++++++++++++++++++++++++++++++++++++ CMakeLists.txt | 11 +--- src/CMakeLists.txt | 6 +- tests/CMakeLists.txt | 17 +++-- 4 files changed, 169 insertions(+), 17 deletions(-) create mode 100644 .clang-format diff --git a/.clang-format b/.clang-format new file mode 100644 index 0000000..3c2a490 --- /dev/null +++ b/.clang-format @@ -0,0 +1,152 @@ +--- +AlignAfterOpenBracket: Align +AlignArrayOfStructures: Right +AlignConsecutiveAssignments: + Enabled: true + AcrossEmptyLines: false + AcrossComments: false + AlignCompound: true + PadOperators: true +AlignConsecutiveBitFields: + Enabled: true + AcrossEmptyLines: false + AcrossComments: false + AlignCompound: true + PadOperators: true +AlignConsecutiveDeclarations: + Enabled: false +#AlignConsecutiveMacros: Consecutive +AlignConsecutiveMacros: + Enabled: true + AcrossEmptyLines: false + AcrossComments: false +AlignEscapedNewlines: Right +AlignOperands: true +# clang-16 +#AlignTrailingComments: +# Kind: Always +# OverEmptyLines: 0 +AllowAllArgumentsOnNextLine: False +AllowAllParametersOfDeclarationOnNextLine: False +# clang-18 +#AllowBreakBeforeNoexceptSpecifier: Never +AllowShortBlocksOnASingleLine: Never +AllowShortCaseLabelsOnASingleLine: False +AllowShortEnumsOnASingleLine: False +AllowShortFunctionsOnASingleLine: Empty +AllowShortIfStatementsOnASingleLine: Never +AllowShortLambdasOnASingleLine: Empty +AllowShortLoopsOnASingleLine: False +AlwaysBreakAfterReturnType: None +AlwaysBreakBeforeMultilineStrings: False +AlwaysBreakTemplateDeclarations: Yes +BinPackArguments: False +BinPackParameters: False +BitFieldColonSpacing: Both +BreakBeforeBraces: Custom +BraceWrapping: + AfterCaseLabel: True + AfterClass: True + AfterControlStatement: Always + AfterEnum: True + AfterFunction: True + AfterNamespace: False + AfterObjCDeclaration: True + AfterStruct: True + AfterUnion: True + AfterExternBlock: True + BeforeCatch: True + BeforeElse: True + BeforeLambdaBody: True + BeforeWhile: True + IndentBraces: False + SplitEmptyFunction: False + SplitEmptyRecord: False + SplitEmptyNamespace: False +# clang-17 +#BracedInitializerIndentWidth: 2 +# clang-16 +#BreakAfterAttributes: Always +# clang-16 +#BreakArrays: False +BreakBeforeBinaryOperators: NonAssignment +BreakBeforeConceptDeclarations: Always +# clang-16 +#BreakBeforeInlineASMColon: OnlyMultiline +BreakBeforeTernaryOperators: True +BreakConstructorInitializers: AfterColon +BreakInheritanceList: AfterComma +BreakStringLiterals: False +ColumnLimit: 120 +CompactNamespaces: True +ContinuationIndentWidth: 2 +Cpp11BracedListStyle: True +EmptyLineAfterAccessModifier: Never +EmptyLineBeforeAccessModifier: Always +FixNamespaceComments: true +IndentAccessModifiers: False +IndentCaseBlocks: True +IndentCaseLabels: True +IndentExternBlock: Indent +IndentGotoLabels: False +IndentPPDirectives: AfterHash +IndentWidth: 2 +IndentWrappedFunctionNames: False +InsertBraces: False +# clang-16 +#InsertNewLineAtEOF: True +# clang-16 +#IntegerLiteralSeparator: +# Binary: 0 +# Decimal: 3 +# Hex: 0 +KeepEmptyLinesAtTheStartOfBlocks: False +LambdaBodyIndentation: Signature +Language: Cpp +MaxEmptyLinesToKeep: 1 +NamespaceIndentation: None +PackConstructorInitializers: Never +PointerAlignment: Left +QualifierAlignment: Left +ReferenceAlignment: Left +ReflowComments: True +RemoveBracesLLVM: False +# clang-17 +#RemoveParentheses: False +# clang-16 +#RemoveSemicolon: False +RequiresClausePosition: OwnLine +# clang-16 +#RequiresExpressionIndentation: OuterScope +SeparateDefinitionBlocks: Always +ShortNamespaceLines: 0 +SortIncludes: CaseInsensitive +# clang-16 +#SortUsingDeclarations: LexicographicNumeric +SpaceAfterCStyleCast: False +SpaceAfterLogicalNot: False +SpaceAfterTemplateKeyword: False +SpaceAroundPointerQualifiers: Default +SpaceBeforeAssignmentOperators: True +SpaceBeforeCaseColon: False +SpaceBeforeCpp11BracedList: False +SpaceBeforeCtorInitializerColon: False +SpaceBeforeInheritanceColon: False +SpaceBeforeParens: Never +SpaceBeforeRangeBasedForLoopColon: False +SpaceBeforeSquareBrackets: False +SpaceInEmptyBlock: False +SpaceInEmptyParentheses: False +SpacesBeforeTrailingComments: 1 +SpacesInAngles: Always +SpacesInCStyleCastParentheses: False +SpacesInConditionalStatement: True +SpacesInContainerLiterals: True +SpacesInLineCommentPrefix: + Minimum: 1 + Maximum: -1 +SpacesInParentheses: True +SpacesInSquareBrackets: True +Standard: c++20 +TabWidth: 2 +UseTab: Never diff --git a/CMakeLists.txt b/CMakeLists.txt index 25bfd58..aea1789 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -16,7 +16,7 @@ include(FetchContent) FetchContent_Declare( koinos_cmake GIT_REPOSITORY https://github.com/koinos/koinos-cmake.git - GIT_TAG f3d5dd68611a515837b2a038cc93365ad12e8c41 + GIT_TAG 5b72b0805bb6770d1c1462bcdf9ff0e423a5dd2c ) FetchContent_MakeAvailable(koinos_cmake) @@ -29,13 +29,6 @@ project(koinos_state_db koinos_define_version() -koinos_coverage( - EXECUTABLE - koinos_state_db_tests - EXCLUDE - "tests/*" -) - koinos_add_package(Boost CONFIG REQUIRED ADD_COMPONENTS log exception test FIND_COMPONENTS log log_setup @@ -58,5 +51,3 @@ koinos_add_package(koinos_util CONFIG REQUIRED) add_subdirectory(src) add_subdirectory(tests) - -koinos_install(TARGETS state_db) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 0355756..c790914 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -34,7 +34,7 @@ target_link_libraries( Koinos::crypto RocksDB::rocksdb) -koinos_add_format_target(state_db) +koinos_add_format(TARGET state_db) target_include_directories( state_db @@ -45,9 +45,11 @@ target_include_directories( add_library(Koinos::state_db ALIAS state_db) +koinos_install(TARGETS state_db) + install( DIRECTORY ${PROJECT_SOURCE_DIR}/include DESTINATION - include/ + ${CMAKE_INSTALL_PREFIX} ) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index d858521..bac6ac3 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -2,17 +2,24 @@ include(CTest) enable_testing() -koinos_add_test(koinos_state_db_tests +koinos_add_test(state_db_tests SOURCES main.cpp state_db_test.cpp ) -koinos_add_format_target(koinos_state_db_tests) - -target_link_libraries(koinos_state_db_tests Koinos::proto Koinos::crypto Koinos::state_db Koinos::log Koinos::util Koinos::exception ${PLATFORM_SPECIFIC_LIBS}) -target_include_directories(koinos_state_db_tests PUBLIC +target_link_libraries(state_db_tests Koinos::proto Koinos::crypto Koinos::state_db Koinos::log Koinos::util Koinos::exception ${PLATFORM_SPECIFIC_LIBS}) +target_include_directories(state_db_tests PUBLIC ${PROJECT_SOURCE_DIR}/src # Private headers $ $ # /include ) + +koinos_add_format(TARGET state_db_tests) + +koinos_coverage( + EXECUTABLE + state_db_tests + EXCLUDE + "tests/*" +) From 6291c7d3ea7d7a44e51205513150df16fb3a1f0f Mon Sep 17 00:00:00 2001 From: Michael Vandeberg Date: Mon, 11 Dec 2023 16:48:35 -0800 Subject: [PATCH 06/26] Cleanup CI and respect namespaces in src --- .travis.yml | 17 +++++++---- CMakeLists.txt | 2 +- README.md | 2 +- src/CMakeLists.txt | 28 ++++++++++--------- .../state_db}/backends/backend.cpp | 0 .../state_db}/backends/iterator.cpp | 0 .../state_db}/backends/map/map_backend.cpp | 0 .../state_db}/backends/map/map_iterator.cpp | 0 .../backends/rocksdb/object_cache.cpp | 0 .../backends/rocksdb/rocksdb_backend.cpp | 0 .../backends/rocksdb/rocksdb_iterator.cpp | 0 src/{ => koinos/state_db}/merge_iterator.cpp | 2 +- src/{ => koinos/state_db}/merge_iterator.hpp | 2 +- src/{ => koinos/state_db}/state_db.cpp | 5 ++-- src/{ => koinos/state_db}/state_delta.cpp | 2 +- src/{ => koinos/state_db}/state_delta.hpp | 0 tests/CMakeLists.txt | 4 +++ tests/state_db_test.cpp | 5 ++-- tools/ci/build.sh | 3 ++ tools/ci/install.sh | 6 ---- tools/ci/test.sh | 3 ++ 21 files changed, 46 insertions(+), 35 deletions(-) rename src/{ => koinos/state_db}/backends/backend.cpp (100%) rename src/{ => koinos/state_db}/backends/iterator.cpp (100%) rename src/{ => koinos/state_db}/backends/map/map_backend.cpp (100%) rename src/{ => koinos/state_db}/backends/map/map_iterator.cpp (100%) rename src/{ => koinos/state_db}/backends/rocksdb/object_cache.cpp (100%) rename src/{ => koinos/state_db}/backends/rocksdb/rocksdb_backend.cpp (100%) rename src/{ => koinos/state_db}/backends/rocksdb/rocksdb_iterator.cpp (100%) rename src/{ => koinos/state_db}/merge_iterator.cpp (99%) rename src/{ => koinos/state_db}/merge_iterator.hpp (99%) rename src/{ => koinos/state_db}/state_db.cpp (99%) rename src/{ => koinos/state_db}/state_delta.cpp (99%) rename src/{ => koinos/state_db}/state_delta.hpp (100%) diff --git a/.travis.yml b/.travis.yml index 8f96854..8c7339e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,18 +7,25 @@ cache: addons: apt: packages: - - python3 - - python3-pip - - python3-setuptools + - clang + - clang-format + - llvm + - lcov + - ruby update: true jobs: include: + - os: linux + dist: jammy + env: + - RUN_TIME=static-coverage + - MATRIX_EVAL="CC=clang && CXX=clang++" - os: linux dist: jammy env: - RUN_TYPE=coverage - - MATRIX_EVAL="CC=clang-11 && CXX=clang++-11" + - MATRIX_EVAL="CC=clang && CXX=clang++" - os: linux dist: jammy env: @@ -28,7 +35,7 @@ jobs: dist: jammy env: - RUN_TYPE=test - - MATRIX_EVAL="CC=clang-11 && CXX=clang++-11" + - MATRIX_EVAL="CC=clang && CXX=clang++" before_install: - eval "${MATRIX_EVAL}" diff --git a/CMakeLists.txt b/CMakeLists.txt index aea1789..8d6005b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -16,7 +16,7 @@ include(FetchContent) FetchContent_Declare( koinos_cmake GIT_REPOSITORY https://github.com/koinos/koinos-cmake.git - GIT_TAG 5b72b0805bb6770d1c1462bcdf9ff0e423a5dd2c + GIT_TAG 4967f0548e3f4f555ac95494413f30ac9d0ced4d ) FetchContent_MakeAvailable(koinos_cmake) diff --git a/README.md b/README.md index 28f005a..ee311a9 100644 --- a/README.md +++ b/README.md @@ -30,7 +30,7 @@ cmake --build . --config Release --parallel You can optionally run static analysis with Clang-Tidy during the build process. Static analysis is checked in CI and is required to pass before merging pull requests. ``` -cmake -D STATIC_ANALYSIS=ON .. +cmake -D CMAKE_BUILD_TYPE=Debug -D STATIC_ANALYSIS=ON .. ``` ### Testing diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index c790914..1d43c8c 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1,17 +1,17 @@ add_library(state_db - state_db.cpp - state_delta.cpp - merge_iterator.cpp - backends/backend.cpp - backends/iterator.cpp - backends/map/map_backend.cpp - backends/map/map_iterator.cpp - backends/rocksdb/rocksdb_backend.cpp - backends/rocksdb/rocksdb_iterator.cpp - backends/rocksdb/object_cache.cpp - - merge_iterator.hpp - state_delta.hpp + koinos/state_db/state_db.cpp + koinos/state_db/state_delta.cpp + koinos/state_db/merge_iterator.cpp + koinos/state_db/backends/backend.cpp + koinos/state_db/backends/iterator.cpp + koinos/state_db/backends/map/map_backend.cpp + koinos/state_db/backends/map/map_iterator.cpp + koinos/state_db/backends/rocksdb/rocksdb_backend.cpp + koinos/state_db/backends/rocksdb/rocksdb_iterator.cpp + koinos/state_db/backends/rocksdb/object_cache.cpp + + koinos/state_db/merge_iterator.hpp + koinos/state_db/state_delta.hpp ${PROJECT_SOURCE_DIR}/include/koinos/state_db/state_db_types.hpp ${PROJECT_SOURCE_DIR}/include/koinos/state_db/state_db.hpp @@ -41,6 +41,8 @@ target_include_directories( PUBLIC $ $ + PRIVATE + ${PROJECT_SOURCE_DIR}/src ) add_library(Koinos::state_db ALIAS state_db) diff --git a/src/backends/backend.cpp b/src/koinos/state_db/backends/backend.cpp similarity index 100% rename from src/backends/backend.cpp rename to src/koinos/state_db/backends/backend.cpp diff --git a/src/backends/iterator.cpp b/src/koinos/state_db/backends/iterator.cpp similarity index 100% rename from src/backends/iterator.cpp rename to src/koinos/state_db/backends/iterator.cpp diff --git a/src/backends/map/map_backend.cpp b/src/koinos/state_db/backends/map/map_backend.cpp similarity index 100% rename from src/backends/map/map_backend.cpp rename to src/koinos/state_db/backends/map/map_backend.cpp diff --git a/src/backends/map/map_iterator.cpp b/src/koinos/state_db/backends/map/map_iterator.cpp similarity index 100% rename from src/backends/map/map_iterator.cpp rename to src/koinos/state_db/backends/map/map_iterator.cpp diff --git a/src/backends/rocksdb/object_cache.cpp b/src/koinos/state_db/backends/rocksdb/object_cache.cpp similarity index 100% rename from src/backends/rocksdb/object_cache.cpp rename to src/koinos/state_db/backends/rocksdb/object_cache.cpp diff --git a/src/backends/rocksdb/rocksdb_backend.cpp b/src/koinos/state_db/backends/rocksdb/rocksdb_backend.cpp similarity index 100% rename from src/backends/rocksdb/rocksdb_backend.cpp rename to src/koinos/state_db/backends/rocksdb/rocksdb_backend.cpp diff --git a/src/backends/rocksdb/rocksdb_iterator.cpp b/src/koinos/state_db/backends/rocksdb/rocksdb_iterator.cpp similarity index 100% rename from src/backends/rocksdb/rocksdb_iterator.cpp rename to src/koinos/state_db/backends/rocksdb/rocksdb_iterator.cpp diff --git a/src/merge_iterator.cpp b/src/koinos/state_db/merge_iterator.cpp similarity index 99% rename from src/merge_iterator.cpp rename to src/koinos/state_db/merge_iterator.cpp index 4eb37ac..cfcd5ef 100644 --- a/src/merge_iterator.cpp +++ b/src/koinos/state_db/merge_iterator.cpp @@ -1,4 +1,4 @@ -#include "merge_iterator.hpp" +#include namespace koinos::state_db::detail { diff --git a/src/merge_iterator.hpp b/src/koinos/state_db/merge_iterator.hpp similarity index 99% rename from src/merge_iterator.hpp rename to src/koinos/state_db/merge_iterator.hpp index d873afc..c2fe218 100644 --- a/src/merge_iterator.hpp +++ b/src/koinos/state_db/merge_iterator.hpp @@ -1,6 +1,6 @@ #pragma once -#include "state_delta.hpp" +#include #include diff --git a/src/state_db.cpp b/src/koinos/state_db/state_db.cpp similarity index 99% rename from src/state_db.cpp rename to src/koinos/state_db/state_db.cpp index 7dc6bc7..97c5af8 100644 --- a/src/state_db.cpp +++ b/src/koinos/state_db/state_db.cpp @@ -1,10 +1,9 @@ -#include "merge_iterator.hpp" -#include "state_delta.hpp" - #include #include +#include #include +#include #include #include diff --git a/src/state_delta.cpp b/src/koinos/state_db/state_delta.cpp similarity index 99% rename from src/state_delta.cpp rename to src/koinos/state_db/state_delta.cpp index c566757..9c8367e 100644 --- a/src/state_delta.cpp +++ b/src/koinos/state_db/state_delta.cpp @@ -1,4 +1,4 @@ -#include "state_delta.hpp" +#include #include diff --git a/src/state_delta.hpp b/src/koinos/state_db/state_delta.hpp similarity index 100% rename from src/state_delta.hpp rename to src/koinos/state_db/state_delta.hpp diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index bac6ac3..9c388ac 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -1,3 +1,7 @@ +if (NOT BUILD_TESTING) + return() +endif() + include(CTest) enable_testing() diff --git a/tests/state_db_test.cpp b/tests/state_db_test.cpp index c89c9e6..75a095c 100644 --- a/tests/state_db_test.cpp +++ b/tests/state_db_test.cpp @@ -1,8 +1,5 @@ #include -#include "merge_iterator.hpp" -#include "state_delta.hpp" - #include #include #include @@ -10,7 +7,9 @@ #include #include #include +#include #include +#include #include #include #include diff --git a/tools/ci/build.sh b/tools/ci/build.sh index f503915..b3aef01 100755 --- a/tools/ci/build.sh +++ b/tools/ci/build.sh @@ -12,4 +12,7 @@ if [ "$RUN_TYPE" = "test" ]; then elif [ "$RUN_TYPE" = "coverage" ]; then cmake -DCMAKE_BUILD_TYPE=Debug -DCOVERAGE=ON .. cmake --build . --config Debug --parallel 3 --target coverage +elif [ "$RUN_TYPE" = "static-analysis" ]; then + cmake -DCMAKE_BUILD_TYPE=Debug -DSTATIC_ANALYSIS=ON .. + cmake --build . --config Debug --parallel 3 fi diff --git a/tools/ci/install.sh b/tools/ci/install.sh index 4024a3f..4371d4e 100755 --- a/tools/ci/install.sh +++ b/tools/ci/install.sh @@ -1,11 +1,5 @@ #!/bin/bash -sudo apt-get install -yq --allow-downgrades libc6=2.31-0ubuntu9.2 libc6-dev=2.31-0ubuntu9.2 - sudo -E apt-get -yq --no-install-suggests --no-install-recommends --allow-downgrades --allow-remove-essential --allow-change-held-packages install clang-11 llvm-11 -o Debug::pkgProblemResolver=yes - if [ "$RUN_TYPE" = "coverage" ]; then - sudo apt-get install -y lcov ruby valgrind sudo gem install coveralls-lcov fi - -pip3 install --user dataclasses-json Jinja2 importlib_resources pluginbase gitpython diff --git a/tools/ci/test.sh b/tools/ci/test.sh index d0eb200..53d7a1c 100755 --- a/tools/ci/test.sh +++ b/tools/ci/test.sh @@ -6,6 +6,9 @@ set -x cd $TRAVIS_BUILD_DIR/build/tests if [ "$RUN_TYPE" = "test" ]; then exec ctest -j3 --output-on-failure + + cd $TRAVIS_BUILD_DIR/build + cmake .. build . --config Release --parallel 3 --target format.check elif [ "$RUN_TYPE" = "coverage" ]; then exec valgrind --error-exitcode=1 --leak-check=yes ./koinos_state_db_tests fi From 760df69defe8363e040de6a536e5f9175b7e9575 Mon Sep 17 00:00:00 2001 From: Michael Vandeberg Date: Mon, 11 Dec 2023 16:50:56 -0800 Subject: [PATCH 07/26] Fix source formatting and enable clang-16 format options --- .clang-format | 38 +- include/koinos/state_db/backends/backend.hpp | 68 +- .../koinos/state_db/backends/exceptions.hpp | 2 +- include/koinos/state_db/backends/iterator.hpp | 60 +- .../state_db/backends/map/map_backend.hpp | 55 +- .../state_db/backends/map/map_iterator.hpp | 34 +- .../state_db/backends/rocksdb/exceptions.hpp | 2 +- .../backends/rocksdb/object_cache.hpp | 62 +- .../backends/rocksdb/rocksdb_backend.hpp | 75 +- .../backends/rocksdb/rocksdb_iterator.hpp | 53 +- include/koinos/state_db/backends/types.hpp | 2 +- include/koinos/state_db/state_db.hpp | 758 ++--- include/koinos/state_db/state_db_types.hpp | 4 +- src/koinos/state_db/backends/backend.cpp | 24 +- src/koinos/state_db/backends/iterator.cpp | 44 +- .../state_db/backends/map/map_backend.cpp | 37 +- .../state_db/backends/map/map_iterator.cpp | 36 +- .../backends/rocksdb/object_cache.cpp | 75 +- .../backends/rocksdb/rocksdb_backend.cpp | 764 ++--- .../backends/rocksdb/rocksdb_iterator.cpp | 165 +- src/koinos/state_db/merge_iterator.cpp | 393 +-- src/koinos/state_db/merge_iterator.hpp | 225 +- src/koinos/state_db/state_db.cpp | 1852 ++++++------ src/koinos/state_db/state_delta.cpp | 494 +-- src/koinos/state_db/state_delta.hpp | 103 +- tests/main.cpp | 2 +- tests/state_db_test.cpp | 2653 +++++++++-------- 27 files changed, 4195 insertions(+), 3885 deletions(-) diff --git a/.clang-format b/.clang-format index 3c2a490..6767e56 100644 --- a/.clang-format +++ b/.clang-format @@ -15,17 +15,15 @@ AlignConsecutiveBitFields: PadOperators: true AlignConsecutiveDeclarations: Enabled: false -#AlignConsecutiveMacros: Consecutive AlignConsecutiveMacros: Enabled: true AcrossEmptyLines: false AcrossComments: false AlignEscapedNewlines: Right AlignOperands: true -# clang-16 -#AlignTrailingComments: -# Kind: Always -# OverEmptyLines: 0 +AlignTrailingComments: + Kind: Always + OverEmptyLines: 0 AllowAllArgumentsOnNextLine: False AllowAllParametersOfDeclarationOnNextLine: False # clang-18 @@ -65,14 +63,11 @@ BraceWrapping: SplitEmptyNamespace: False # clang-17 #BracedInitializerIndentWidth: 2 -# clang-16 -#BreakAfterAttributes: Always -# clang-16 -#BreakArrays: False +BreakAfterAttributes: Always +BreakArrays: False BreakBeforeBinaryOperators: NonAssignment BreakBeforeConceptDeclarations: Always -# clang-16 -#BreakBeforeInlineASMColon: OnlyMultiline +BreakBeforeInlineASMColon: OnlyMultiline BreakBeforeTernaryOperators: True BreakConstructorInitializers: AfterColon BreakInheritanceList: AfterComma @@ -93,13 +88,11 @@ IndentPPDirectives: AfterHash IndentWidth: 2 IndentWrappedFunctionNames: False InsertBraces: False -# clang-16 -#InsertNewLineAtEOF: True -# clang-16 -#IntegerLiteralSeparator: -# Binary: 0 -# Decimal: 3 -# Hex: 0 +InsertNewlineAtEOF: True +IntegerLiteralSeparator: + Binary: 0 + Decimal: 3 + Hex: 0 KeepEmptyLinesAtTheStartOfBlocks: False LambdaBodyIndentation: Signature Language: Cpp @@ -113,16 +106,13 @@ ReflowComments: True RemoveBracesLLVM: False # clang-17 #RemoveParentheses: False -# clang-16 -#RemoveSemicolon: False +RemoveSemicolon: False RequiresClausePosition: OwnLine -# clang-16 -#RequiresExpressionIndentation: OuterScope +RequiresExpressionIndentation: OuterScope SeparateDefinitionBlocks: Always ShortNamespaceLines: 0 SortIncludes: CaseInsensitive -# clang-16 -#SortUsingDeclarations: LexicographicNumeric +SortUsingDeclarations: LexicographicNumeric SpaceAfterCStyleCast: False SpaceAfterLogicalNot: False SpaceAfterTemplateKeyword: False diff --git a/include/koinos/state_db/backends/backend.hpp b/include/koinos/state_db/backends/backend.hpp index e579d56..756d0d8 100644 --- a/include/koinos/state_db/backends/backend.hpp +++ b/include/koinos/state_db/backends/backend.hpp @@ -9,52 +9,52 @@ namespace koinos::state_db::backends { class abstract_backend { - public: - using key_type = detail::key_type; - using value_type = detail::value_type; - using size_type = detail::size_type; +public: + using key_type = detail::key_type; + using value_type = detail::value_type; + using size_type = detail::size_type; - abstract_backend(); - virtual ~abstract_backend() {}; + abstract_backend(); + virtual ~abstract_backend(){}; - virtual iterator begin() = 0; - virtual iterator end() = 0; + virtual iterator begin() = 0; + virtual iterator end() = 0; - virtual void put( const key_type& k, const value_type& v ) = 0; - virtual const value_type* get( const key_type& ) const = 0; - virtual void erase( const key_type& k ) = 0; - virtual void clear() = 0; + virtual void put( const key_type& k, const value_type& v ) = 0; + virtual const value_type* get( const key_type& ) const = 0; + virtual void erase( const key_type& k ) = 0; + virtual void clear() = 0; - virtual size_type size() const = 0; - bool empty() const; + virtual size_type size() const = 0; + bool empty() const; - virtual iterator find( const key_type& k ) = 0; - virtual iterator lower_bound( const key_type& k ) = 0; + virtual iterator find( const key_type& k ) = 0; + virtual iterator lower_bound( const key_type& k ) = 0; - size_type revision() const; - void set_revision( size_type ); + size_type revision() const; + void set_revision( size_type ); - const crypto::multihash& id() const; - void set_id( const crypto::multihash& ); + const crypto::multihash& id() const; + void set_id( const crypto::multihash& ); - const crypto::multihash& merkle_root() const; - void set_merkle_root( const crypto::multihash& ); + const crypto::multihash& merkle_root() const; + void set_merkle_root( const crypto::multihash& ); - const protocol::block_header& block_header() const; - void set_block_header( const protocol::block_header& ); + const protocol::block_header& block_header() const; + void set_block_header( const protocol::block_header& ); - virtual void start_write_batch() = 0; - virtual void end_write_batch() = 0; + virtual void start_write_batch() = 0; + virtual void end_write_batch() = 0; - virtual void store_metadata() = 0; + virtual void store_metadata() = 0; - virtual std::shared_ptr< abstract_backend > clone() const = 0; + virtual std::shared_ptr< abstract_backend > clone() const = 0; - private: - size_type _revision = 0; - crypto::multihash _id; - crypto::multihash _merkle_root; - protocol::block_header _header; +private: + size_type _revision = 0; + crypto::multihash _id; + crypto::multihash _merkle_root; + protocol::block_header _header; }; -} // koinos::state_db::backends +} // namespace koinos::state_db::backends diff --git a/include/koinos/state_db/backends/exceptions.hpp b/include/koinos/state_db/backends/exceptions.hpp index 2cc90a7..5181928 100644 --- a/include/koinos/state_db/backends/exceptions.hpp +++ b/include/koinos/state_db/backends/exceptions.hpp @@ -7,4 +7,4 @@ KOINOS_DECLARE_DERIVED_EXCEPTION( backend_exception, state_db_exception ); KOINOS_DECLARE_DERIVED_EXCEPTION( iterator_exception, state_db_exception ); KOINOS_DECLARE_DERIVED_EXCEPTION( internal_exception, state_db_exception ); -} // koinos::state_db::backends +} // namespace koinos::state_db::backends diff --git a/include/koinos/state_db/backends/iterator.hpp b/include/koinos/state_db/backends/iterator.hpp index c3c96e1..6a33ab6 100644 --- a/include/koinos/state_db/backends/iterator.hpp +++ b/include/koinos/state_db/backends/iterator.hpp @@ -10,53 +10,53 @@ class iterator; class abstract_iterator { - public: - using key_type = detail::key_type; - using value_type = detail::value_type; +public: + using key_type = detail::key_type; + using value_type = detail::value_type; - virtual ~abstract_iterator() {}; + virtual ~abstract_iterator(){}; - virtual const value_type& operator*() const = 0; + virtual const value_type& operator*() const = 0; - virtual const key_type& key() const = 0; + virtual const key_type& key() const = 0; - virtual abstract_iterator& operator++() = 0; - virtual abstract_iterator& operator--() = 0; + virtual abstract_iterator& operator++() = 0; + virtual abstract_iterator& operator--() = 0; - private: - friend class iterator; +private: + friend class iterator; - virtual bool valid() const = 0; - virtual std::unique_ptr< abstract_iterator > copy() const = 0; + virtual bool valid() const = 0; + virtual std::unique_ptr< abstract_iterator > copy() const = 0; }; class iterator final { - public: - using key_type = detail::key_type; - using value_type = detail::value_type; +public: + using key_type = detail::key_type; + using value_type = detail::value_type; - iterator( std::unique_ptr< abstract_iterator > ); - iterator( const iterator& other ); - iterator( iterator&& other ); + iterator( std::unique_ptr< abstract_iterator > ); + iterator( const iterator& other ); + iterator( iterator&& other ); - const value_type& operator*() const; + const value_type& operator*() const; - const key_type& key() const; - const value_type& value() const; + const key_type& key() const; + const value_type& value() const; - iterator& operator++(); - iterator& operator--(); + iterator& operator++(); + iterator& operator--(); - iterator& operator=( iterator&& other ); + iterator& operator=( iterator&& other ); - friend bool operator==( const iterator& x, const iterator& y ); - friend bool operator!=( const iterator& x, const iterator& y ); + friend bool operator==( const iterator& x, const iterator& y ); + friend bool operator!=( const iterator& x, const iterator& y ); - private: - bool valid() const; +private: + bool valid() const; - std::unique_ptr< abstract_iterator > _itr; + std::unique_ptr< abstract_iterator > _itr; }; -} // koinos::state_db::backends +} // namespace koinos::state_db::backends diff --git a/include/koinos/state_db/backends/map/map_backend.hpp b/include/koinos/state_db/backends/map/map_backend.hpp index 83a4de1..5c2454e 100644 --- a/include/koinos/state_db/backends/map/map_backend.hpp +++ b/include/koinos/state_db/backends/map/map_backend.hpp @@ -5,41 +5,42 @@ namespace koinos::state_db::backends::map { -class map_backend final : public abstract_backend { - public: - using key_type = abstract_backend::key_type; - using value_type = abstract_backend::value_type; - using size_type = abstract_backend::size_type; +class map_backend final: public abstract_backend +{ +public: + using key_type = abstract_backend::key_type; + using value_type = abstract_backend::value_type; + using size_type = abstract_backend::size_type; - map_backend(); - virtual ~map_backend() override; + map_backend(); + virtual ~map_backend() override; - // Iterators - virtual iterator begin() noexcept override; - virtual iterator end() noexcept override; + // Iterators + virtual iterator begin() noexcept override; + virtual iterator end() noexcept override; - // Modifiers - virtual void put( const key_type& k, const value_type& v ) override; - virtual const value_type* get( const key_type& ) const override; - virtual void erase( const key_type& k ) override; - virtual void clear() noexcept override; + // Modifiers + virtual void put( const key_type& k, const value_type& v ) override; + virtual const value_type* get( const key_type& ) const override; + virtual void erase( const key_type& k ) override; + virtual void clear() noexcept override; - virtual size_type size() const noexcept override; + virtual size_type size() const noexcept override; - // Lookup - virtual iterator find( const key_type& k ) override; - virtual iterator lower_bound( const key_type& k ) override; + // Lookup + virtual iterator find( const key_type& k ) override; + virtual iterator lower_bound( const key_type& k ) override; - virtual void start_write_batch() override; - virtual void end_write_batch() override; + virtual void start_write_batch() override; + virtual void end_write_batch() override; - virtual void store_metadata() override; + virtual void store_metadata() override; - virtual std::shared_ptr< abstract_backend > clone() const override; + virtual std::shared_ptr< abstract_backend > clone() const override; - private: - std::map< key_type, value_type > _map; - protocol::block_header _header; +private: + std::map< key_type, value_type > _map; + protocol::block_header _header; }; -} // koinos::state_db::backends::map +} // namespace koinos::state_db::backends::map diff --git a/include/koinos/state_db/backends/map/map_iterator.hpp b/include/koinos/state_db/backends/map/map_iterator.hpp index 854c492..83f606e 100644 --- a/include/koinos/state_db/backends/map/map_iterator.hpp +++ b/include/koinos/state_db/backends/map/map_iterator.hpp @@ -8,29 +8,29 @@ namespace koinos::state_db::backends::map { class map_backend; -class map_iterator final : public abstract_iterator +class map_iterator final: public abstract_iterator { - public: - using value_type = abstract_iterator::value_type; - using map_impl = std::map< detail::key_type, detail::value_type >; - using iterator_impl = map_impl::iterator; +public: + using value_type = abstract_iterator::value_type; + using map_impl = std::map< detail::key_type, detail::value_type >; + using iterator_impl = map_impl::iterator; - map_iterator( std::unique_ptr< iterator_impl > itr, const map_impl& map ); - ~map_iterator(); + map_iterator( std::unique_ptr< iterator_impl > itr, const map_impl& map ); + ~map_iterator(); - virtual const value_type& operator*() const override; + virtual const value_type& operator*() const override; - virtual const key_type& key() const override; + virtual const key_type& key() const override; - virtual abstract_iterator& operator++() override; - virtual abstract_iterator& operator--() override; + virtual abstract_iterator& operator++() override; + virtual abstract_iterator& operator--() override; - private: - virtual bool valid() const override; - virtual std::unique_ptr< abstract_iterator > copy() const override; +private: + virtual bool valid() const override; + virtual std::unique_ptr< abstract_iterator > copy() const override; - std::unique_ptr< iterator_impl > _itr; - const map_impl& _map; + std::unique_ptr< iterator_impl > _itr; + const map_impl& _map; }; -} // koinos::state_db::backends::map +} // namespace koinos::state_db::backends::map diff --git a/include/koinos/state_db/backends/rocksdb/exceptions.hpp b/include/koinos/state_db/backends/rocksdb/exceptions.hpp index 5205705..a226c68 100644 --- a/include/koinos/state_db/backends/rocksdb/exceptions.hpp +++ b/include/koinos/state_db/backends/rocksdb/exceptions.hpp @@ -13,4 +13,4 @@ KOINOS_DECLARE_DERIVED_EXCEPTION( rocksdb_write_exception, rocksdb_backend_excep KOINOS_DECLARE_DERIVED_EXCEPTION( rocksdb_session_in_progress, rocksdb_backend_exception ); KOINOS_DECLARE_DERIVED_EXCEPTION( rocksdb_internal_exception, rocksdb_backend_exception ); -} // koinos::state_db::backends::rocksdb +} // namespace koinos::state_db::backends::rocksdb diff --git a/include/koinos/state_db/backends/rocksdb/object_cache.hpp b/include/koinos/state_db/backends/rocksdb/object_cache.hpp index 42cbb60..dc065b4 100644 --- a/include/koinos/state_db/backends/rocksdb/object_cache.hpp +++ b/include/koinos/state_db/backends/rocksdb/object_cache.hpp @@ -14,39 +14,33 @@ namespace koinos::state_db::backends::rocksdb { class object_cache { - public: - using key_type = detail::key_type; - using value_type = detail::value_type; - - private: - using lru_list_type = std::list< key_type >; - using value_map_type = - std::map< - key_type, - std::pair< - std::shared_ptr< const value_type >, - typename lru_list_type::iterator - > - >; - - lru_list_type _lru_list; - value_map_type _object_map; - std::size_t _cache_size = 0; - const std::size_t _cache_max_size; - std::mutex _mutex; - - public: - object_cache( std::size_t size ); - ~object_cache(); - - std::pair< bool, std::shared_ptr< const value_type > > get( const key_type& k ); - std::shared_ptr< const value_type > put( const key_type& k, std::shared_ptr< const value_type > v ); - - void remove( const key_type& k ); - - void clear(); - - std::mutex& get_mutex(); +public: + using key_type = detail::key_type; + using value_type = detail::value_type; + +private: + using lru_list_type = std::list< key_type >; + using value_map_type = + std::map< key_type, std::pair< std::shared_ptr< const value_type >, typename lru_list_type::iterator > >; + + lru_list_type _lru_list; + value_map_type _object_map; + std::size_t _cache_size = 0; + const std::size_t _cache_max_size; + std::mutex _mutex; + +public: + object_cache( std::size_t size ); + ~object_cache(); + + std::pair< bool, std::shared_ptr< const value_type > > get( const key_type& k ); + std::shared_ptr< const value_type > put( const key_type& k, std::shared_ptr< const value_type > v ); + + void remove( const key_type& k ); + + void clear(); + + std::mutex& get_mutex(); }; -} // koinos::state_db::backends::rocksdb +} // namespace koinos::state_db::backends::rocksdb diff --git a/include/koinos/state_db/backends/rocksdb/rocksdb_backend.hpp b/include/koinos/state_db/backends/rocksdb/rocksdb_backend.hpp index 5768d0d..6dc6500 100644 --- a/include/koinos/state_db/backends/rocksdb/rocksdb_backend.hpp +++ b/include/koinos/state_db/backends/rocksdb/rocksdb_backend.hpp @@ -13,54 +13,55 @@ namespace koinos::state_db::backends::rocksdb { -class rocksdb_backend final : public abstract_backend { - public: - using key_type = abstract_backend::key_type; - using value_type = abstract_backend::value_type; - using size_type = abstract_backend::size_type; +class rocksdb_backend final: public abstract_backend +{ +public: + using key_type = abstract_backend::key_type; + using value_type = abstract_backend::value_type; + using size_type = abstract_backend::size_type; - rocksdb_backend(); - ~rocksdb_backend(); + rocksdb_backend(); + ~rocksdb_backend(); - void open( const std::filesystem::path& p ); - void close(); - void flush(); + void open( const std::filesystem::path& p ); + void close(); + void flush(); - virtual void start_write_batch() override; - virtual void end_write_batch() override; + virtual void start_write_batch() override; + virtual void end_write_batch() override; - // Iterators - virtual iterator begin() override; - virtual iterator end() override; + // Iterators + virtual iterator begin() override; + virtual iterator end() override; - // Modifiers - virtual void put( const key_type& k, const value_type& v ) override; - virtual const value_type* get( const key_type& ) const override; - virtual void erase( const key_type& k ) override; - virtual void clear() override; + // Modifiers + virtual void put( const key_type& k, const value_type& v ) override; + virtual const value_type* get( const key_type& ) const override; + virtual void erase( const key_type& k ) override; + virtual void clear() override; - virtual size_type size() const override; + virtual size_type size() const override; - // Lookup - virtual iterator find( const key_type& k ) override; - virtual iterator lower_bound( const key_type& k ) override; + // Lookup + virtual iterator find( const key_type& k ) override; + virtual iterator lower_bound( const key_type& k ) override; - virtual void store_metadata() override; + virtual void store_metadata() override; - virtual std::shared_ptr< abstract_backend > clone() const override; + virtual std::shared_ptr< abstract_backend > clone() const override; - private: - void load_metadata(); +private: + void load_metadata(); - using column_handles = std::vector< std::shared_ptr< ::rocksdb::ColumnFamilyHandle > >; + using column_handles = std::vector< std::shared_ptr< ::rocksdb::ColumnFamilyHandle > >; - std::shared_ptr< ::rocksdb::DB > _db; - std::optional< ::rocksdb::WriteBatch > _write_batch; - column_handles _handles; - ::rocksdb::WriteOptions _wopts; - std::shared_ptr< ::rocksdb::ReadOptions > _ropts; - mutable std::shared_ptr< object_cache > _cache; - size_type _size = 0; + std::shared_ptr< ::rocksdb::DB > _db; + std::optional< ::rocksdb::WriteBatch > _write_batch; + column_handles _handles; + ::rocksdb::WriteOptions _wopts; + std::shared_ptr< ::rocksdb::ReadOptions > _ropts; + mutable std::shared_ptr< object_cache > _cache; + size_type _size = 0; }; -} // koinos::state_db::backends::rocksdb +} // namespace koinos::state_db::backends::rocksdb diff --git a/include/koinos/state_db/backends/rocksdb/rocksdb_iterator.hpp b/include/koinos/state_db/backends/rocksdb/rocksdb_iterator.hpp index 9de070c..194654a 100644 --- a/include/koinos/state_db/backends/rocksdb/rocksdb_iterator.hpp +++ b/include/koinos/state_db/backends/rocksdb/rocksdb_iterator.hpp @@ -11,41 +11,40 @@ namespace koinos::state_db::backends::rocksdb { class rocksdb_backend; -class rocksdb_iterator final : public abstract_iterator +class rocksdb_iterator final: public abstract_iterator { - public: - using value_type = abstract_iterator::value_type; +public: + using value_type = abstract_iterator::value_type; - rocksdb_iterator( - std::shared_ptr< ::rocksdb::DB > db, - std::shared_ptr< ::rocksdb::ColumnFamilyHandle > handle, - std::shared_ptr< const ::rocksdb::ReadOptions > opts, - std::shared_ptr< object_cache > cache ); - rocksdb_iterator( const rocksdb_iterator& other ); - virtual ~rocksdb_iterator() override; + rocksdb_iterator( std::shared_ptr< ::rocksdb::DB > db, + std::shared_ptr< ::rocksdb::ColumnFamilyHandle > handle, + std::shared_ptr< const ::rocksdb::ReadOptions > opts, + std::shared_ptr< object_cache > cache ); + rocksdb_iterator( const rocksdb_iterator& other ); + virtual ~rocksdb_iterator() override; - virtual const value_type& operator*() const override; + virtual const value_type& operator*() const override; - virtual const key_type& key() const override; + virtual const key_type& key() const override; - virtual abstract_iterator& operator++() override; - virtual abstract_iterator& operator--() override; + virtual abstract_iterator& operator++() override; + virtual abstract_iterator& operator--() override; - private: - friend class rocksdb_backend; +private: + friend class rocksdb_backend; - virtual bool valid() const override; - virtual std::unique_ptr< abstract_iterator > copy() const override; + virtual bool valid() const override; + virtual std::unique_ptr< abstract_iterator > copy() const override; - void update_cache_value() const; + void update_cache_value() const; - std::shared_ptr< ::rocksdb::DB > _db; - std::shared_ptr< ::rocksdb::ColumnFamilyHandle > _handle; - std::unique_ptr< ::rocksdb::Iterator > _iter; - std::shared_ptr< const ::rocksdb::ReadOptions > _opts; - mutable std::shared_ptr< object_cache > _cache; - mutable std::shared_ptr< const value_type > _cache_value; - mutable std::shared_ptr< const key_type > _key; + std::shared_ptr< ::rocksdb::DB > _db; + std::shared_ptr< ::rocksdb::ColumnFamilyHandle > _handle; + std::unique_ptr< ::rocksdb::Iterator > _iter; + std::shared_ptr< const ::rocksdb::ReadOptions > _opts; + mutable std::shared_ptr< object_cache > _cache; + mutable std::shared_ptr< const value_type > _cache_value; + mutable std::shared_ptr< const key_type > _key; }; -} // koinos::state_db::backends::rocksdb +} // namespace koinos::state_db::backends::rocksdb diff --git a/include/koinos/state_db/backends/types.hpp b/include/koinos/state_db/backends/types.hpp index 216cda2..2591706 100644 --- a/include/koinos/state_db/backends/types.hpp +++ b/include/koinos/state_db/backends/types.hpp @@ -9,4 +9,4 @@ using key_type = std::string; using value_type = std::string; using size_type = uint64_t; -} // koinos::state_db::backends::detail +} // namespace koinos::state_db::backends::detail diff --git a/include/koinos/state_db/state_db.hpp b/include/koinos/state_db/state_db.hpp index f076b97..2bef39b 100644 --- a/include/koinos/state_db/state_db.hpp +++ b/include/koinos/state_db/state_db.hpp @@ -9,8 +9,8 @@ #include #include #include -#include #include +#include #include namespace koinos::state_db { @@ -21,19 +21,19 @@ class database_impl; class state_node_impl; class anonymous_state_node_impl; -} // detail +} // namespace detail class abstract_state_node; class anonymous_state_node; -using abstract_state_node_ptr = std::shared_ptr< abstract_state_node >; +using abstract_state_node_ptr = std::shared_ptr< abstract_state_node >; using anonymous_state_node_ptr = std::shared_ptr< anonymous_state_node >; enum class fork_resolution_algorithm { - fifo, - block_time, - pob + fifo, + block_time, + pob }; /** @@ -41,136 +41,140 @@ enum class fork_resolution_algorithm */ class abstract_state_node { - public: - abstract_state_node(); - virtual ~abstract_state_node(); - - /** - * Fetch an object if one exists. - * - * - Size of the object is written into result.size - * - Object's value is copied into args.buf, provided buf != nullptr and buf_size is large enough - * - If buf is too small, buf is unchanged, however result is still updated - * - args.key is copied into result.key - */ - const object_value* get_object( const object_space& space, const object_key& key ) const; - - /** - * Get the next object. - * - * - Size of the object is written into result.size - * - Object's value is copied into args.buf, provided buf != nullptr and buf_size is large enough - * - If buf is too small, buf is unchanged, however result is still updated - * - Found key is written into result - */ - std::pair< const object_value*, const object_key > get_next_object( const object_space& space, const object_key& key ) const; - - /** - * Get the previous object. - * - * - Size of the object is written into result.size - * - Object's value is copied into args.buf, provided buf != nullptr and buf_size is large enough - * - If buf is too small, buf is unchanged, however result is still updated - * - Found key is written into result - */ - std::pair< const object_value*, const object_key > get_prev_object( const object_space& space, const object_key& key ) const; - - /** - * Write an object into the state_node. - * - * - Fail if node is not writable. - * - If object exists, object is overwritten. - */ - int64_t put_object( const object_space& space, const object_key& key, const object_value* val ); - - /** - * Remove an object from the state_node - */ - int64_t remove_object( const object_space& space, const object_key& key ); - - /** - * Return true if the node is writable. - */ - bool is_finalized() const; - - /** - * Return the merkle root of writes on this state node - */ - crypto::multihash merkle_root() const; - - /** - * Returns the state delta entries associated with this state node - */ - std::vector< protocol::state_delta_entry > get_delta_entries() const; - - /** - * Returns an anonymous state node with this node as its parent. - */ - anonymous_state_node_ptr create_anonymous_node(); - - virtual const state_node_id& id() const = 0; - virtual const state_node_id& parent_id() const = 0; - virtual uint64_t revision() const = 0; - virtual abstract_state_node_ptr parent() const = 0; - virtual const protocol::block_header& block_header() const = 0; - - friend class detail::database_impl; - - protected: - virtual std::shared_ptr< abstract_state_node > shared_from_derived() = 0; - - std::unique_ptr< detail::state_node_impl > _impl; +public: + abstract_state_node(); + virtual ~abstract_state_node(); + + /** + * Fetch an object if one exists. + * + * - Size of the object is written into result.size + * - Object's value is copied into args.buf, provided buf != nullptr and buf_size is large enough + * - If buf is too small, buf is unchanged, however result is still updated + * - args.key is copied into result.key + */ + const object_value* get_object( const object_space& space, const object_key& key ) const; + + /** + * Get the next object. + * + * - Size of the object is written into result.size + * - Object's value is copied into args.buf, provided buf != nullptr and buf_size is large enough + * - If buf is too small, buf is unchanged, however result is still updated + * - Found key is written into result + */ + std::pair< const object_value*, const object_key > get_next_object( const object_space& space, + const object_key& key ) const; + + /** + * Get the previous object. + * + * - Size of the object is written into result.size + * - Object's value is copied into args.buf, provided buf != nullptr and buf_size is large enough + * - If buf is too small, buf is unchanged, however result is still updated + * - Found key is written into result + */ + std::pair< const object_value*, const object_key > get_prev_object( const object_space& space, + const object_key& key ) const; + + /** + * Write an object into the state_node. + * + * - Fail if node is not writable. + * - If object exists, object is overwritten. + */ + int64_t put_object( const object_space& space, const object_key& key, const object_value* val ); + + /** + * Remove an object from the state_node + */ + int64_t remove_object( const object_space& space, const object_key& key ); + + /** + * Return true if the node is writable. + */ + bool is_finalized() const; + + /** + * Return the merkle root of writes on this state node + */ + crypto::multihash merkle_root() const; + + /** + * Returns the state delta entries associated with this state node + */ + std::vector< protocol::state_delta_entry > get_delta_entries() const; + + /** + * Returns an anonymous state node with this node as its parent. + */ + anonymous_state_node_ptr create_anonymous_node(); + + virtual const state_node_id& id() const = 0; + virtual const state_node_id& parent_id() const = 0; + virtual uint64_t revision() const = 0; + virtual abstract_state_node_ptr parent() const = 0; + virtual const protocol::block_header& block_header() const = 0; + + friend class detail::database_impl; + +protected: + virtual std::shared_ptr< abstract_state_node > shared_from_derived() = 0; + + std::unique_ptr< detail::state_node_impl > _impl; }; -class anonymous_state_node final : public abstract_state_node, public std::enable_shared_from_this< anonymous_state_node > +class anonymous_state_node final: public abstract_state_node, + public std::enable_shared_from_this< anonymous_state_node > { - public: - anonymous_state_node(); - ~anonymous_state_node(); +public: + anonymous_state_node(); + ~anonymous_state_node(); - const state_node_id& id() const override; - const state_node_id& parent_id() const override; - uint64_t revision() const override; - abstract_state_node_ptr parent() const override; - const protocol::block_header& block_header() const override; + const state_node_id& id() const override; + const state_node_id& parent_id() const override; + uint64_t revision() const override; + abstract_state_node_ptr parent() const override; + const protocol::block_header& block_header() const override; - void commit(); - void reset(); + void commit(); + void reset(); - friend class abstract_state_node; + friend class abstract_state_node; - protected: - std::shared_ptr< abstract_state_node > shared_from_derived()override; +protected: + std::shared_ptr< abstract_state_node > shared_from_derived() override; - private: - abstract_state_node_ptr _parent; +private: + abstract_state_node_ptr _parent; }; /** * Allows querying the database at a particular checkpoint. */ -class state_node final : public abstract_state_node, public std::enable_shared_from_this< state_node > +class state_node final: public abstract_state_node, + public std::enable_shared_from_this< state_node > { - public: - state_node(); - ~state_node(); - - const state_node_id& id() const override; - const state_node_id& parent_id() const override; - uint64_t revision() const override; - abstract_state_node_ptr parent() const override; - const protocol::block_header& block_header() const override; - - protected: - std::shared_ptr< abstract_state_node > shared_from_derived()override; +public: + state_node(); + ~state_node(); + + const state_node_id& id() const override; + const state_node_id& parent_id() const override; + uint64_t revision() const override; + abstract_state_node_ptr parent() const override; + const protocol::block_header& block_header() const override; + +protected: + std::shared_ptr< abstract_state_node > shared_from_derived() override; }; -using state_node_ptr = std::shared_ptr< state_node >; -using genesis_init_function = std::function< void( state_node_ptr ) >; -using fork_list = std::vector< state_node_ptr >; +using state_node_ptr = std::shared_ptr< state_node >; +using genesis_init_function = std::function< void( state_node_ptr ) >; +using fork_list = std::vector< state_node_ptr >; using state_node_comparator_function = std::function< state_node_ptr( fork_list&, state_node_ptr, state_node_ptr ) >; -using shared_lock_ptr = std::shared_ptr< const std::shared_lock< std::shared_mutex > >; -using unique_lock_ptr = std::shared_ptr< const std::unique_lock< std::shared_mutex > >; +using shared_lock_ptr = std::shared_ptr< const std::shared_lock< std::shared_mutex > >; +using unique_lock_ptr = std::shared_ptr< const std::unique_lock< std::shared_mutex > >; state_node_ptr fifo_comparator( fork_list& forks, state_node_ptr current_head, state_node_ptr new_head ); state_node_ptr block_time_comparator( fork_list& forks, state_node_ptr current_head, state_node_ptr new_head ); @@ -211,253 +215,273 @@ state_node_ptr pob_comparator( fork_list& forks, state_node_ptr current_head, st */ class database final { - public: - database(); - ~database(); - - shared_lock_ptr get_shared_lock() const; - - unique_lock_ptr get_unique_lock() const; - - /** - * Open the database. - */ - void open( const std::optional< std::filesystem::path >& p, genesis_init_function init, fork_resolution_algorithm algo, const unique_lock_ptr& lock ); - - /** - * Open the database. - */ - void open( const std::optional< std::filesystem::path >& p, genesis_init_function init, state_node_comparator_function comp, const unique_lock_ptr& lock ); - - /** - * Close the database. - */ - void close( const unique_lock_ptr& lock ); - - /** - * Reset the database. - */ - void reset( const unique_lock_ptr& lock ); - - /** - * Get an ancestor of a node at a particular revision - */ - state_node_ptr get_node_at_revision( uint64_t revision, const state_node_id& child_id, const shared_lock_ptr& lock ) const; - state_node_ptr get_node_at_revision( uint64_t revision, const shared_lock_ptr& lock ) const; - - /** - * Get an ancestor of a node at a particular revision - * - * WARNING: The state node returned does not have an internal lock. The caller - * must be careful to ensure internal consistency. Best practice is to not - * share this node with a parallel thread and to reset it before releasing the - * unique lock. - */ - state_node_ptr get_node_at_revision( uint64_t revision, const state_node_id& child_id, const unique_lock_ptr& lock ) const; - state_node_ptr get_node_at_revision( uint64_t revision, const unique_lock_ptr& lock ) const; - - /** - * Get the state_node for the given state_node_id. - * - * Return an empty pointer if no node for the given id exists. - */ - state_node_ptr get_node( const state_node_id& node_id, const shared_lock_ptr& lock ) const; - - /** - * Get the state_node for the given state_node_id. - * - * Return an empty pointer if no node for the given id exists. - * - * WARNING: The state node returned does not have an internal lock. The caller - * must be careful to ensure internal consistency. Best practice is to not - * share this node with a parallel thread and to reset it before releasing the - * unique lock. - */ - state_node_ptr get_node( const state_node_id& node_id, const unique_lock_ptr& lock ) const; - - /** - * Create a writable state_node. - * - * - If parent_id refers to a writable node, fail. - * - Otherwise, return a new writable node. - * - Writing to the returned node will not modify the parent node. - * - * If the parent is subsequently discarded, database preserves - * as much of the parent's state storage as necessary to continue - * to serve queries on any (non-discarded) children. A discarded - * parent node's state may internally be merged into a child's - * state storage area, allowing the parent's state storage area - * to be freed. This merge may occur immediately, or it may be - * deferred or parallelized. - */ - state_node_ptr create_writable_node( const state_node_id& parent_id, const state_node_id& new_id, const protocol::block_header& header, const shared_lock_ptr& lock ); - - /** - * Create a writable state_node. - * - * - If parent_id refers to a writable node, fail. - * - Otherwise, return a new writable node. - * - Writing to the returned node will not modify the parent node. - * - * If the parent is subsequently discarded, database preserves - * as much of the parent's state storage as necessary to continue - * to serve queries on any (non-discarded) children. A discarded - * parent node's state may internally be merged into a child's - * state storage area, allowing the parent's state storage area - * to be freed. This merge may occur immediately, or it may be - * deferred or parallelized. - * - * WARNING: The state node returned does not have an internal lock. The caller - * must be careful to ensure internal consistency. Best practice is to not - * share this node with a parallel thread and to reset it before releasing the - * unique lock. - */ - state_node_ptr create_writable_node( const state_node_id& parent_id, const state_node_id& new_id, const protocol::block_header& header, const unique_lock_ptr& lock ); - - /** - * Clone a node with a new id and block header. - * - * Cannot clone a finalized node. - */ - state_node_ptr clone_node( const state_node_id& node_id, const state_node_id& new_id, const protocol::block_header& header, const shared_lock_ptr& lock ); - - /** - * Clone a node with a new id and block header. - * - * Cannot clone a finalized node. - * - * WARNING: The state node returned does not have an internal lock. The caller - * must be careful to ensure internal consistency. Best practice is to not - * share this node with a parallel thread and to reset it before releasing the - * unique lock. - */ - state_node_ptr clone_node( const state_node_id& node_id, const state_node_id& new_id, const protocol::block_header& header, const unique_lock_ptr& lock ); - - /** - * Finalize a node. The node will no longer be writable. - */ - void finalize_node( const state_node_id& node_id, const shared_lock_ptr& lock ); - - /** - * Finalize a node. The node will no longer be writable. - */ - void finalize_node( const state_node_id& node_id, const unique_lock_ptr& lock ); - - /** - * Discard the node, it can no longer be used. - * - * If the node has any children, they too will be deleted because - * there will no longer exist a path from root to those nodes. - * - * This will fail if the node you are deleting would cause the - * current head node to be delted. - */ - void discard_node( const state_node_id& node_id, const shared_lock_ptr& lock ); - - /** - * Discard the node, it can no longer be used. - * - * If the node has any children, they too will be deleted because - * there will no longer exist a path from root to those nodes. - * - * This will fail if the node you are deleting would cause the - * current head node to be delted. - */ - void discard_node( const state_node_id& node_id, const unique_lock_ptr& lock ); - - /** - * Squash the node in to the root state, committing it. - * Branching state between this node and its ancestor will be discarded - * and no longer accesible. - * - * It is the responsiblity of the caller to ensure no readers or writers - * are accessing affected nodes by this call. - * - * TODO: Implement thread safety within commit node to make - * database thread safe for all callers. - */ - void commit_node( const state_node_id& node_id, const unique_lock_ptr& lock ); - - /** - * Get and return the current "head" node. - * - * Head is determined by longest chain. Oldest - * chain wins in a tie of length. Only finalized - * nodes are eligible to become head. - */ - state_node_ptr get_head( const shared_lock_ptr& lock ) const; - - /** - * Get and return the current "head" node. - * - * Head is determined by longest chain. Oldest - * chain wins in a tie of length. Only finalized - * nodes are eligible to become head. - * - * WARNING: The state node returned does not have an internal lock. The caller - * must be careful to ensure internal consistency. Best practice is to not - * share this node with a parallel thread and to reset it before releasing the - * unique lock. - */ - state_node_ptr get_head( const unique_lock_ptr& lock ) const; - - /** - * Get and return a vector of all fork heads. - * - * Fork heads are any finalized nodes that do - * not have finalized children. - */ - std::vector< state_node_ptr > get_fork_heads( const shared_lock_ptr& lock ) const; - - /** - * Get and return a vector of all fork heads. - * - * Fork heads are any finalized nodes that do - * not have finalized children. - * - * WARNING: The state nodes returned does not have an internal lock. The caller - * must be careful to ensure internal consistency. Best practice is to not - * share this node with a parallel thread and to reset it before releasing the - * unique lock. - */ - std::vector< state_node_ptr > get_fork_heads( const unique_lock_ptr& lock ) const; - - /** - * Get and return a vector of all nodes. - */ - std::vector< state_node_ptr > get_all_nodes( const shared_lock_ptr& lock ) const; - - /** - * Get and return a vector of all nodes. - * - * WARNING: The state nodes returned does not have an internal lock. The caller - * must be careful to ensure internal consistency. Best practice is to not - * share this node with a parallel thread and to reset it before releasing the - * unique lock. - */ - std::vector< state_node_ptr > get_all_nodes( const unique_lock_ptr& lock ) const; - - /** - * Get and return the current "root" node. - * - * All state nodes are guaranteed to a descendant of root. - */ - state_node_ptr get_root( const shared_lock_ptr& lock ) const; - - /** - * Get and return the current "root" node. - * - * All state nodes are guaranteed to a descendant of root. - * - * WARNING: The state node returned does not have an internal lock. The caller - * must be careful to ensure internal consistency. Best practice is to not - * share this node with a parallel thread and to reset it before releasing the - * unique lock. - */ - state_node_ptr get_root( const unique_lock_ptr& lock ) const; - - private: - std::unique_ptr< detail::database_impl > impl; +public: + database(); + ~database(); + + shared_lock_ptr get_shared_lock() const; + + unique_lock_ptr get_unique_lock() const; + + /** + * Open the database. + */ + void open( const std::optional< std::filesystem::path >& p, + genesis_init_function init, + fork_resolution_algorithm algo, + const unique_lock_ptr& lock ); + + /** + * Open the database. + */ + void open( const std::optional< std::filesystem::path >& p, + genesis_init_function init, + state_node_comparator_function comp, + const unique_lock_ptr& lock ); + + /** + * Close the database. + */ + void close( const unique_lock_ptr& lock ); + + /** + * Reset the database. + */ + void reset( const unique_lock_ptr& lock ); + + /** + * Get an ancestor of a node at a particular revision + */ + state_node_ptr + get_node_at_revision( uint64_t revision, const state_node_id& child_id, const shared_lock_ptr& lock ) const; + state_node_ptr get_node_at_revision( uint64_t revision, const shared_lock_ptr& lock ) const; + + /** + * Get an ancestor of a node at a particular revision + * + * WARNING: The state node returned does not have an internal lock. The caller + * must be careful to ensure internal consistency. Best practice is to not + * share this node with a parallel thread and to reset it before releasing the + * unique lock. + */ + state_node_ptr + get_node_at_revision( uint64_t revision, const state_node_id& child_id, const unique_lock_ptr& lock ) const; + state_node_ptr get_node_at_revision( uint64_t revision, const unique_lock_ptr& lock ) const; + + /** + * Get the state_node for the given state_node_id. + * + * Return an empty pointer if no node for the given id exists. + */ + state_node_ptr get_node( const state_node_id& node_id, const shared_lock_ptr& lock ) const; + + /** + * Get the state_node for the given state_node_id. + * + * Return an empty pointer if no node for the given id exists. + * + * WARNING: The state node returned does not have an internal lock. The caller + * must be careful to ensure internal consistency. Best practice is to not + * share this node with a parallel thread and to reset it before releasing the + * unique lock. + */ + state_node_ptr get_node( const state_node_id& node_id, const unique_lock_ptr& lock ) const; + + /** + * Create a writable state_node. + * + * - If parent_id refers to a writable node, fail. + * - Otherwise, return a new writable node. + * - Writing to the returned node will not modify the parent node. + * + * If the parent is subsequently discarded, database preserves + * as much of the parent's state storage as necessary to continue + * to serve queries on any (non-discarded) children. A discarded + * parent node's state may internally be merged into a child's + * state storage area, allowing the parent's state storage area + * to be freed. This merge may occur immediately, or it may be + * deferred or parallelized. + */ + state_node_ptr create_writable_node( const state_node_id& parent_id, + const state_node_id& new_id, + const protocol::block_header& header, + const shared_lock_ptr& lock ); + + /** + * Create a writable state_node. + * + * - If parent_id refers to a writable node, fail. + * - Otherwise, return a new writable node. + * - Writing to the returned node will not modify the parent node. + * + * If the parent is subsequently discarded, database preserves + * as much of the parent's state storage as necessary to continue + * to serve queries on any (non-discarded) children. A discarded + * parent node's state may internally be merged into a child's + * state storage area, allowing the parent's state storage area + * to be freed. This merge may occur immediately, or it may be + * deferred or parallelized. + * + * WARNING: The state node returned does not have an internal lock. The caller + * must be careful to ensure internal consistency. Best practice is to not + * share this node with a parallel thread and to reset it before releasing the + * unique lock. + */ + state_node_ptr create_writable_node( const state_node_id& parent_id, + const state_node_id& new_id, + const protocol::block_header& header, + const unique_lock_ptr& lock ); + + /** + * Clone a node with a new id and block header. + * + * Cannot clone a finalized node. + */ + state_node_ptr clone_node( const state_node_id& node_id, + const state_node_id& new_id, + const protocol::block_header& header, + const shared_lock_ptr& lock ); + + /** + * Clone a node with a new id and block header. + * + * Cannot clone a finalized node. + * + * WARNING: The state node returned does not have an internal lock. The caller + * must be careful to ensure internal consistency. Best practice is to not + * share this node with a parallel thread and to reset it before releasing the + * unique lock. + */ + state_node_ptr clone_node( const state_node_id& node_id, + const state_node_id& new_id, + const protocol::block_header& header, + const unique_lock_ptr& lock ); + + /** + * Finalize a node. The node will no longer be writable. + */ + void finalize_node( const state_node_id& node_id, const shared_lock_ptr& lock ); + + /** + * Finalize a node. The node will no longer be writable. + */ + void finalize_node( const state_node_id& node_id, const unique_lock_ptr& lock ); + + /** + * Discard the node, it can no longer be used. + * + * If the node has any children, they too will be deleted because + * there will no longer exist a path from root to those nodes. + * + * This will fail if the node you are deleting would cause the + * current head node to be delted. + */ + void discard_node( const state_node_id& node_id, const shared_lock_ptr& lock ); + + /** + * Discard the node, it can no longer be used. + * + * If the node has any children, they too will be deleted because + * there will no longer exist a path from root to those nodes. + * + * This will fail if the node you are deleting would cause the + * current head node to be delted. + */ + void discard_node( const state_node_id& node_id, const unique_lock_ptr& lock ); + + /** + * Squash the node in to the root state, committing it. + * Branching state between this node and its ancestor will be discarded + * and no longer accesible. + * + * It is the responsiblity of the caller to ensure no readers or writers + * are accessing affected nodes by this call. + * + * TODO: Implement thread safety within commit node to make + * database thread safe for all callers. + */ + void commit_node( const state_node_id& node_id, const unique_lock_ptr& lock ); + + /** + * Get and return the current "head" node. + * + * Head is determined by longest chain. Oldest + * chain wins in a tie of length. Only finalized + * nodes are eligible to become head. + */ + state_node_ptr get_head( const shared_lock_ptr& lock ) const; + + /** + * Get and return the current "head" node. + * + * Head is determined by longest chain. Oldest + * chain wins in a tie of length. Only finalized + * nodes are eligible to become head. + * + * WARNING: The state node returned does not have an internal lock. The caller + * must be careful to ensure internal consistency. Best practice is to not + * share this node with a parallel thread and to reset it before releasing the + * unique lock. + */ + state_node_ptr get_head( const unique_lock_ptr& lock ) const; + + /** + * Get and return a vector of all fork heads. + * + * Fork heads are any finalized nodes that do + * not have finalized children. + */ + std::vector< state_node_ptr > get_fork_heads( const shared_lock_ptr& lock ) const; + + /** + * Get and return a vector of all fork heads. + * + * Fork heads are any finalized nodes that do + * not have finalized children. + * + * WARNING: The state nodes returned does not have an internal lock. The caller + * must be careful to ensure internal consistency. Best practice is to not + * share this node with a parallel thread and to reset it before releasing the + * unique lock. + */ + std::vector< state_node_ptr > get_fork_heads( const unique_lock_ptr& lock ) const; + + /** + * Get and return a vector of all nodes. + */ + std::vector< state_node_ptr > get_all_nodes( const shared_lock_ptr& lock ) const; + + /** + * Get and return a vector of all nodes. + * + * WARNING: The state nodes returned does not have an internal lock. The caller + * must be careful to ensure internal consistency. Best practice is to not + * share this node with a parallel thread and to reset it before releasing the + * unique lock. + */ + std::vector< state_node_ptr > get_all_nodes( const unique_lock_ptr& lock ) const; + + /** + * Get and return the current "root" node. + * + * All state nodes are guaranteed to a descendant of root. + */ + state_node_ptr get_root( const shared_lock_ptr& lock ) const; + + /** + * Get and return the current "root" node. + * + * All state nodes are guaranteed to a descendant of root. + * + * WARNING: The state node returned does not have an internal lock. The caller + * must be careful to ensure internal consistency. Best practice is to not + * share this node with a parallel thread and to reset it before releasing the + * unique lock. + */ + state_node_ptr get_root( const unique_lock_ptr& lock ) const; + +private: + std::unique_ptr< detail::database_impl > impl; }; -} // koinos::state_db +} // namespace koinos::state_db diff --git a/include/koinos/state_db/state_db_types.hpp b/include/koinos/state_db/state_db_types.hpp index 3911c03..57373a3 100644 --- a/include/koinos/state_db/state_db_types.hpp +++ b/include/koinos/state_db/state_db_types.hpp @@ -3,9 +3,9 @@ #include #include -#include #include #include +#include namespace koinos::state_db { @@ -49,4 +49,4 @@ KOINOS_DECLARE_DERIVED_EXCEPTION( cannot_discard, state_db_exception ); */ KOINOS_DECLARE_DERIVED_EXCEPTION( internal_error, state_db_exception ); -} // koinos::state_db +} // namespace koinos::state_db diff --git a/src/koinos/state_db/backends/backend.cpp b/src/koinos/state_db/backends/backend.cpp index 7c31839..fdd91ac 100644 --- a/src/koinos/state_db/backends/backend.cpp +++ b/src/koinos/state_db/backends/backend.cpp @@ -2,53 +2,53 @@ namespace koinos::state_db::backends { -abstract_backend::abstract_backend() : - _id( crypto::multihash::zero( crypto::multicodec::sha2_256 ) ) +abstract_backend::abstract_backend(): + _id( crypto::multihash::zero( crypto::multicodec::sha2_256 ) ) {} bool abstract_backend::empty() const { - return size() == 0; + return size() == 0; } abstract_backend::size_type abstract_backend::revision() const { - return _revision; + return _revision; } void abstract_backend::set_revision( abstract_backend::size_type revision ) { - _revision = revision; + _revision = revision; } const crypto::multihash& abstract_backend::id() const { - return _id; + return _id; } void abstract_backend::set_id( const crypto::multihash& id ) { - _id = id; + _id = id; } const crypto::multihash& abstract_backend::merkle_root() const { - return _merkle_root; + return _merkle_root; } void abstract_backend::set_merkle_root( const crypto::multihash& merkle_root ) { - _merkle_root = merkle_root; + _merkle_root = merkle_root; } const protocol::block_header& abstract_backend::block_header() const { - return _header; + return _header; } void abstract_backend::set_block_header( const protocol::block_header& header ) { - _header = header; + _header = header; } -} // koinos::state_db::backends +} // namespace koinos::state_db::backends diff --git a/src/koinos/state_db/backends/iterator.cpp b/src/koinos/state_db/backends/iterator.cpp index 68a195f..60e9ede 100644 --- a/src/koinos/state_db/backends/iterator.cpp +++ b/src/koinos/state_db/backends/iterator.cpp @@ -3,58 +3,64 @@ namespace koinos::state_db::backends { -iterator::iterator( std::unique_ptr< abstract_iterator > itr ) : _itr( std::move( itr ) ) {} +iterator::iterator( std::unique_ptr< abstract_iterator > itr ): + _itr( std::move( itr ) ) +{} -iterator::iterator( const iterator& other ) : _itr( other._itr->copy() ) {} +iterator::iterator( const iterator& other ): + _itr( other._itr->copy() ) +{} -iterator::iterator( iterator&& other ) : _itr( std::move( other._itr ) ) {} +iterator::iterator( iterator&& other ): + _itr( std::move( other._itr ) ) +{} const iterator::value_type& iterator::operator*() const { - return **_itr; + return **_itr; } const iterator::key_type& iterator::key() const { - return _itr->key(); + return _itr->key(); } iterator& iterator::operator++() { - ++(*_itr); - return *this; + ++( *_itr ); + return *this; } iterator& iterator::operator--() { - --(*_itr); - return *this; + --( *_itr ); + return *this; } iterator& iterator::operator=( iterator&& other ) { - _itr = std::move( other._itr ); - return *this; + _itr = std::move( other._itr ); + return *this; } bool iterator::valid() const { - return _itr && _itr->valid(); + return _itr && _itr->valid(); } bool operator==( const iterator& x, const iterator& y ) { - if ( x.valid() && y.valid() ) - { - return *x == *y; - } + if( x.valid() && y.valid() ) + { + return *x == *y; + } - return x.valid() == y.valid(); + return x.valid() == y.valid(); } bool operator!=( const iterator& x, const iterator& y ) { - return !( x == y ); + return !( x == y ); } -} // koinos::state_db::backends +} // namespace koinos::state_db::backends diff --git a/src/koinos/state_db/backends/map/map_backend.cpp b/src/koinos/state_db/backends/map/map_backend.cpp index 32b0dab..580f470 100644 --- a/src/koinos/state_db/backends/map/map_backend.cpp +++ b/src/koinos/state_db/backends/map/map_backend.cpp @@ -8,53 +8,58 @@ map_backend::~map_backend() {} iterator map_backend::begin() noexcept { - return iterator( std::make_unique< map_iterator >( std::make_unique< map_iterator::iterator_impl >( _map.begin() ), _map ) ); + return iterator( + std::make_unique< map_iterator >( std::make_unique< map_iterator::iterator_impl >( _map.begin() ), _map ) ); } iterator map_backend::end() noexcept { - return iterator( std::make_unique< map_iterator >( std::make_unique< map_iterator::iterator_impl >( _map.end() ), _map ) ); + return iterator( + std::make_unique< map_iterator >( std::make_unique< map_iterator::iterator_impl >( _map.end() ), _map ) ); } void map_backend::put( const key_type& k, const value_type& v ) { - _map.insert_or_assign( k, v ); + _map.insert_or_assign( k, v ); } const map_backend::value_type* map_backend::get( const key_type& key ) const { - auto itr = _map.find( key ); - if ( itr == _map.end() ) - { - return nullptr; - } + auto itr = _map.find( key ); + if( itr == _map.end() ) + { + return nullptr; + } - return &itr->second; + return &itr->second; } void map_backend::erase( const key_type& k ) { - _map.erase( k ); + _map.erase( k ); } void map_backend::clear() noexcept { - _map.clear(); + _map.clear(); } map_backend::size_type map_backend::size() const noexcept { - return _map.size(); + return _map.size(); } iterator map_backend::find( const key_type& k ) { - return iterator( std::make_unique< map_iterator >( std::make_unique< map_iterator::iterator_impl >( _map.find( k ) ), _map ) ); + return iterator( + std::make_unique< map_iterator >( std::make_unique< map_iterator::iterator_impl >( _map.find( k ) ), _map ) ); } iterator map_backend::lower_bound( const key_type& k ) { - return iterator( std::make_unique< map_iterator >( std::make_unique< map_iterator::iterator_impl >( _map.lower_bound( k ) ), _map ) ); + return iterator( + std::make_unique< map_iterator >( std::make_unique< map_iterator::iterator_impl >( _map.lower_bound( k ) ), + _map ) ); } void map_backend::start_write_batch() {} @@ -65,7 +70,7 @@ void map_backend::store_metadata() {} std::shared_ptr< abstract_backend > map_backend::clone() const { - return std::make_shared< map_backend >( *this ); + return std::make_shared< map_backend >( *this ); } -} // koinos::state_db::backends::map +} // namespace koinos::state_db::backends::map diff --git a/src/koinos/state_db/backends/map/map_iterator.cpp b/src/koinos/state_db/backends/map/map_iterator.cpp index 6d37e73..5b84960 100644 --- a/src/koinos/state_db/backends/map/map_iterator.cpp +++ b/src/koinos/state_db/backends/map/map_iterator.cpp @@ -5,47 +5,49 @@ namespace koinos::state_db::backends::map { map_iterator::map_iterator( std::unique_ptr< std::map< detail::key_type, detail::value_type >::iterator > itr, - const std::map< detail::key_type, detail::value_type >& map ) : - _itr( std::move( itr ) ), - _map( map ) - {} + const std::map< detail::key_type, detail::value_type >& map ): + _itr( std::move( itr ) ), + _map( map ) +{} map_iterator::~map_iterator() {} const map_iterator::value_type& map_iterator::operator*() const { - KOINOS_ASSERT( valid(), iterator_exception, "iterator operation is invalid" ); - return (*_itr)->second; + KOINOS_ASSERT( valid(), iterator_exception, "iterator operation is invalid" ); + return ( *_itr )->second; } const map_iterator::key_type& map_iterator::key() const { - KOINOS_ASSERT( valid(), iterator_exception, "iterator operation is invalid" ); - return (*_itr)->first; + KOINOS_ASSERT( valid(), iterator_exception, "iterator operation is invalid" ); + return ( *_itr )->first; } abstract_iterator& map_iterator::operator++() { - KOINOS_ASSERT( valid(), iterator_exception, "iterator operation is invalid" ); - ++(*_itr); - return *this; + KOINOS_ASSERT( valid(), iterator_exception, "iterator operation is invalid" ); + ++( *_itr ); + return *this; } abstract_iterator& map_iterator::operator--() { - KOINOS_ASSERT( *_itr != _map.begin(), iterator_exception, "iterator operation is invalid" ); - --(*_itr); - return *this; + KOINOS_ASSERT( *_itr != _map.begin(), iterator_exception, "iterator operation is invalid" ); + --( *_itr ); + return *this; } bool map_iterator::valid() const { - return _itr && *_itr != _map.end(); + return _itr && *_itr != _map.end(); } std::unique_ptr< abstract_iterator > map_iterator::copy() const { - return std::make_unique< map_iterator >( std::make_unique< std::map< detail::key_type, detail::value_type >::iterator >( *_itr ), _map ); + return std::make_unique< map_iterator >( + std::make_unique< std::map< detail::key_type, detail::value_type >::iterator >( *_itr ), + _map ); } -} // koinos::state_db::backends::map +} // namespace koinos::state_db::backends::map diff --git a/src/koinos/state_db/backends/rocksdb/object_cache.cpp b/src/koinos/state_db/backends/rocksdb/object_cache.cpp index 9bde358..3573654 100644 --- a/src/koinos/state_db/backends/rocksdb/object_cache.cpp +++ b/src/koinos/state_db/backends/rocksdb/object_cache.cpp @@ -4,70 +4,73 @@ namespace koinos::state_db::backends::rocksdb { -object_cache::object_cache( std::size_t size ) : _cache_max_size( size ) {} +object_cache::object_cache( std::size_t size ): + _cache_max_size( size ) +{} object_cache::~object_cache() {} std::pair< bool, std::shared_ptr< const object_cache::value_type > > object_cache::get( const key_type& k ) { - auto itr = _object_map.find( k ); - if ( itr == _object_map.end() ) - return std::make_pair( false, std::shared_ptr< const object_cache::value_type >() ); + auto itr = _object_map.find( k ); + if( itr == _object_map.end() ) + return std::make_pair( false, std::shared_ptr< const object_cache::value_type >() ); - // Erase the entry from the list and push front - _lru_list.erase( itr->second.second ); - _lru_list.push_front( k ); - auto val = itr->second.first; + // Erase the entry from the list and push front + _lru_list.erase( itr->second.second ); + _lru_list.push_front( k ); + auto val = itr->second.first; - _object_map[ k ] = std::make_pair( val, _lru_list.begin() ); + _object_map[ k ] = std::make_pair( val, _lru_list.begin() ); - assert( _object_map.size() == _lru_list.size() ); + assert( _object_map.size() == _lru_list.size() ); - return std::make_pair( true, val ); + return std::make_pair( true, val ); } -std::shared_ptr< const object_cache::value_type > object_cache::put( const key_type& k, std::shared_ptr< const object_cache::value_type > v ) +std::shared_ptr< const object_cache::value_type > +object_cache::put( const key_type& k, std::shared_ptr< const object_cache::value_type > v ) { - remove( k ); + remove( k ); - // Min 1 byte for key and 1 byte for value - auto entry_size = std::max( k.size() + ( v ? v->size() : 0 ), std::size_t( 2 ) ); + // Min 1 byte for key and 1 byte for value + auto entry_size = std::max( k.size() + ( v ? v->size() : 0 ), std::size_t( 2 ) ); - // If the cache is full, remove the last entry from the map and pop back - while ( _cache_size + entry_size > _cache_max_size ) - remove( _lru_list.back() ); + // If the cache is full, remove the last entry from the map and pop back + while( _cache_size + entry_size > _cache_max_size ) + remove( _lru_list.back() ); - _lru_list.push_front( k ); - _object_map[ k ] = std::make_pair( v, _lru_list.begin() ); - _cache_size += entry_size; + _lru_list.push_front( k ); + _object_map[ k ] = std::make_pair( v, _lru_list.begin() ); + _cache_size += entry_size; - assert( _object_map.size() == _lru_list.size() ); + assert( _object_map.size() == _lru_list.size() ); - return v; + return v; } void object_cache::remove( const key_type& k ) { - auto itr = _object_map.find( k ); - if ( itr != _object_map.end() ) - { - _cache_size -= std::max( k.size() + ( itr->second.first ? itr->second.first->size() : 0 ), std::size_t( 2 ) ); - _lru_list.erase( itr->second.second ); - _object_map.erase( itr ); - } - - assert( _object_map.size() == _lru_list.size() ); + auto itr = _object_map.find( k ); + if( itr != _object_map.end() ) + { + _cache_size -= std::max( k.size() + ( itr->second.first ? itr->second.first->size() : 0 ), std::size_t( 2 ) ); + _lru_list.erase( itr->second.second ); + _object_map.erase( itr ); + } + + assert( _object_map.size() == _lru_list.size() ); } void object_cache::clear() { - _object_map.clear(); - _lru_list.clear(); + _object_map.clear(); + _lru_list.clear(); } std::mutex& object_cache::get_mutex() { - return _mutex; + return _mutex; } -} // koinos::state_db::backends::rocksdb +} // namespace koinos::state_db::backends::rocksdb diff --git a/src/koinos/state_db/backends/rocksdb/rocksdb_backend.cpp b/src/koinos/state_db/backends/rocksdb/rocksdb_backend.cpp index dd81d52..1c2baf8 100644 --- a/src/koinos/state_db/backends/rocksdb/rocksdb_backend.cpp +++ b/src/koinos/state_db/backends/rocksdb/rocksdb_backend.cpp @@ -11,505 +11,505 @@ namespace koinos::state_db::backends::rocksdb { namespace constants { - constexpr std::size_t cache_size = 64 << 20; // 64 MB - constexpr std::size_t max_open_files = 64; - - constexpr std::size_t default_column_index = 0; - const std::string objects_column_name = "objects"; - constexpr std::size_t objects_column_index = 1; - const std::string metadata_column_name = "metadata"; - constexpr std::size_t metadata_column_index = 2; - - const std::string size_key = "size"; - const std::string revision_key = "revision"; - const std::string id_key = "id"; - const std::string merkle_root_key = "merkle_root"; - const std::string block_header_key = "block_header"; - - constexpr rocksdb_backend::size_type size_default = 0; - constexpr rocksdb_backend::size_type revision_default = 0; - const crypto::multihash id_default = crypto::multihash::zero( crypto::multicodec::sha2_256 ); - const crypto::multihash merkle_root_default = crypto::multihash::zero( crypto::multicodec::sha2_256 ); - const protocol::block_header block_header_default = protocol::block_header(); -} // constants +constexpr std::size_t cache_size = 64 << 20; // 64 MB +constexpr std::size_t max_open_files = 64; + +constexpr std::size_t default_column_index = 0; +const std::string objects_column_name = "objects"; +constexpr std::size_t objects_column_index = 1; +const std::string metadata_column_name = "metadata"; +constexpr std::size_t metadata_column_index = 2; + +const std::string size_key = "size"; +const std::string revision_key = "revision"; +const std::string id_key = "id"; +const std::string merkle_root_key = "merkle_root"; +const std::string block_header_key = "block_header"; + +constexpr rocksdb_backend::size_type size_default = 0; +constexpr rocksdb_backend::size_type revision_default = 0; +const crypto::multihash id_default = crypto::multihash::zero( crypto::multicodec::sha2_256 ); +const crypto::multihash merkle_root_default = crypto::multihash::zero( crypto::multicodec::sha2_256 ); +const protocol::block_header block_header_default = protocol::block_header(); +} // namespace constants bool setup_database( const std::filesystem::path& p ) { - std::vector< ::rocksdb::ColumnFamilyDescriptor > defs; - defs.emplace_back( - constants::objects_column_name, - ::rocksdb::ColumnFamilyOptions() ); - defs.emplace_back( - constants::metadata_column_name, - ::rocksdb::ColumnFamilyOptions() ); - - ::rocksdb::Options options; - options.create_if_missing = true; - - ::rocksdb::DB* db; - auto status = ::rocksdb::DB::Open( options, p.string(), &db ); - - KOINOS_ASSERT( status.ok(), rocksdb_open_exception, "unable to open rocksdb database" + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); - - auto db_ptr = std::shared_ptr< ::rocksdb::DB >( db ); - - std::vector< ::rocksdb::ColumnFamilyHandle* > handles; - status = db->CreateColumnFamilies( defs, &handles ); - - if ( !status.ok() ) - { - return false; - } - - std::vector< std::shared_ptr< ::rocksdb::ColumnFamilyHandle > > handle_ptrs; - - for ( auto* h : handles ) - handle_ptrs.emplace_back( h ); - - ::rocksdb::WriteOptions wopts; - - status = db_ptr->Put( - wopts, - &*handle_ptrs[ 1 ], - ::rocksdb::Slice( constants::size_key ), - ::rocksdb::Slice( util::converter::as< std::string >( constants::size_default ) ) - ); - - if ( !status.ok() ) - { - handle_ptrs.clear(); - db_ptr.reset(); - return false; - } - - status = db_ptr->Put( - wopts, - &*handle_ptrs[ 1 ], - ::rocksdb::Slice( constants::revision_key ), - ::rocksdb::Slice( util::converter::as< std::string >( constants::revision_default ) ) - ); - - if ( !status.ok() ) - { - handle_ptrs.clear(); - db_ptr.reset(); - return false; - } - - status = db_ptr->Put( - wopts, - &*handle_ptrs[ 1 ], - ::rocksdb::Slice( constants::id_key ), - ::rocksdb::Slice( util::converter::as< std::string >( constants::id_default ) ) - ); - - if ( !status.ok() ) - { - handle_ptrs.clear(); - db_ptr.reset(); - return false; - } - - status = db_ptr->Put( - wopts, - &*handle_ptrs[ 1 ], - ::rocksdb::Slice( constants::merkle_root_key ), - ::rocksdb::Slice( util::converter::as< std::string >( constants::merkle_root_default ) ) - ); - - if ( !status.ok() ) - { - handle_ptrs.clear(); - db_ptr.reset(); - return false; - } - - status = db_ptr->Put( - wopts, - &*handle_ptrs[ 1 ], - ::rocksdb::Slice( constants::block_header_key ), - ::rocksdb::Slice( util::converter::as< std::string >( constants::block_header_default ) ) - ); - - handle_ptrs.clear(); - db_ptr.reset(); - - return status.ok(); + std::vector< ::rocksdb::ColumnFamilyDescriptor > defs; + defs.emplace_back( constants::objects_column_name, ::rocksdb::ColumnFamilyOptions() ); + defs.emplace_back( constants::metadata_column_name, ::rocksdb::ColumnFamilyOptions() ); + + ::rocksdb::Options options; + options.create_if_missing = true; + + ::rocksdb::DB* db; + auto status = ::rocksdb::DB::Open( options, p.string(), &db ); + + KOINOS_ASSERT( status.ok(), + rocksdb_open_exception, + "unable to open rocksdb database" + + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); + + auto db_ptr = std::shared_ptr< ::rocksdb::DB >( db ); + + std::vector< ::rocksdb::ColumnFamilyHandle* > handles; + status = db->CreateColumnFamilies( defs, &handles ); + + if( !status.ok() ) + { + return false; + } + + std::vector< std::shared_ptr< ::rocksdb::ColumnFamilyHandle > > handle_ptrs; + + for( auto* h: handles ) + handle_ptrs.emplace_back( h ); + + ::rocksdb::WriteOptions wopts; + + status = db_ptr->Put( wopts, + &*handle_ptrs[ 1 ], + ::rocksdb::Slice( constants::size_key ), + ::rocksdb::Slice( util::converter::as< std::string >( constants::size_default ) ) ); + + if( !status.ok() ) + { + handle_ptrs.clear(); + db_ptr.reset(); + return false; + } + + status = db_ptr->Put( wopts, + &*handle_ptrs[ 1 ], + ::rocksdb::Slice( constants::revision_key ), + ::rocksdb::Slice( util::converter::as< std::string >( constants::revision_default ) ) ); + + if( !status.ok() ) + { + handle_ptrs.clear(); + db_ptr.reset(); + return false; + } + + status = db_ptr->Put( wopts, + &*handle_ptrs[ 1 ], + ::rocksdb::Slice( constants::id_key ), + ::rocksdb::Slice( util::converter::as< std::string >( constants::id_default ) ) ); + + if( !status.ok() ) + { + handle_ptrs.clear(); + db_ptr.reset(); + return false; + } + + status = db_ptr->Put( wopts, + &*handle_ptrs[ 1 ], + ::rocksdb::Slice( constants::merkle_root_key ), + ::rocksdb::Slice( util::converter::as< std::string >( constants::merkle_root_default ) ) ); + + if( !status.ok() ) + { + handle_ptrs.clear(); + db_ptr.reset(); + return false; + } + + status = db_ptr->Put( wopts, + &*handle_ptrs[ 1 ], + ::rocksdb::Slice( constants::block_header_key ), + ::rocksdb::Slice( util::converter::as< std::string >( constants::block_header_default ) ) ); + + handle_ptrs.clear(); + db_ptr.reset(); + + return status.ok(); } -rocksdb_backend::rocksdb_backend() : - _cache( std::make_shared< object_cache >( constants::cache_size ) ), - _ropts( std::make_shared< ::rocksdb::ReadOptions >() ) +rocksdb_backend::rocksdb_backend(): + _cache( std::make_shared< object_cache >( constants::cache_size ) ), + _ropts( std::make_shared< ::rocksdb::ReadOptions >() ) {} rocksdb_backend::~rocksdb_backend() { - close(); + close(); } void rocksdb_backend::open( const std::filesystem::path& p ) { - KOINOS_ASSERT( p.is_absolute(), rocksdb_open_exception, "path must be absolute, ${p}", ("p", p.string()) ); - KOINOS_ASSERT( std::filesystem::exists( p ), rocksdb_open_exception, "path does not exist, ${p}", ("p", p.string()) ); - - std::vector< ::rocksdb::ColumnFamilyDescriptor > defs; - defs.emplace_back( - ::rocksdb::kDefaultColumnFamilyName, - ::rocksdb::ColumnFamilyOptions() ); - defs.emplace_back( - constants::objects_column_name, - ::rocksdb::ColumnFamilyOptions() ); - defs.emplace_back( - constants::metadata_column_name, - ::rocksdb::ColumnFamilyOptions() ); - - std::vector< ::rocksdb::ColumnFamilyHandle* > handles; - - ::rocksdb::Options options; - options.max_open_files = constants::max_open_files; - ::rocksdb::DB* db; - - auto status = ::rocksdb::DB::Open( options, p.string(), defs, &handles, &db ); - - if ( !status.ok() ) - { - KOINOS_ASSERT( setup_database( p ), rocksdb_setup_exception, "unable to configure rocksdb database" ); - - status = ::rocksdb::DB::Open( options, p.string(), defs, &handles, &db ); - KOINOS_ASSERT( status.ok(), rocksdb_open_exception, "unable to open rocksdb database" + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); - } - - _db = std::shared_ptr< ::rocksdb::DB >( db ); - - for ( auto* h : handles ) - _handles.emplace_back( h ); - - try - { - load_metadata(); - } - catch ( ... ) - { - _handles.clear(); - _db.reset(); - throw; - } + KOINOS_ASSERT( p.is_absolute(), rocksdb_open_exception, "path must be absolute, ${p}", ( "p", p.string() ) ); + KOINOS_ASSERT( std::filesystem::exists( p ), + rocksdb_open_exception, + "path does not exist, ${p}", + ( "p", p.string() ) ); + + std::vector< ::rocksdb::ColumnFamilyDescriptor > defs; + defs.emplace_back( ::rocksdb::kDefaultColumnFamilyName, ::rocksdb::ColumnFamilyOptions() ); + defs.emplace_back( constants::objects_column_name, ::rocksdb::ColumnFamilyOptions() ); + defs.emplace_back( constants::metadata_column_name, ::rocksdb::ColumnFamilyOptions() ); + + std::vector< ::rocksdb::ColumnFamilyHandle* > handles; + + ::rocksdb::Options options; + options.max_open_files = constants::max_open_files; + ::rocksdb::DB* db; + + auto status = ::rocksdb::DB::Open( options, p.string(), defs, &handles, &db ); + + if( !status.ok() ) + { + KOINOS_ASSERT( setup_database( p ), rocksdb_setup_exception, "unable to configure rocksdb database" ); + + status = ::rocksdb::DB::Open( options, p.string(), defs, &handles, &db ); + KOINOS_ASSERT( status.ok(), + rocksdb_open_exception, + "unable to open rocksdb database" + + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); + } + + _db = std::shared_ptr< ::rocksdb::DB >( db ); + + for( auto* h: handles ) + _handles.emplace_back( h ); + + try + { + load_metadata(); + } + catch( ... ) + { + _handles.clear(); + _db.reset(); + throw; + } } void rocksdb_backend::close() { - if ( _db ) - { - store_metadata(); - flush(); - - ::rocksdb::CancelAllBackgroundWork( &*_db, true ); - _handles.clear(); - _db.reset(); - std::lock_guard lock( _cache->get_mutex() ); - _cache->clear(); - } + if( _db ) + { + store_metadata(); + flush(); + + ::rocksdb::CancelAllBackgroundWork( &*_db, true ); + _handles.clear(); + _db.reset(); + std::lock_guard lock( _cache->get_mutex() ); + _cache->clear(); + } } void rocksdb_backend::flush() { - KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); + KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); - static const ::rocksdb::FlushOptions flush_options; + static const ::rocksdb::FlushOptions flush_options; - _db->Flush( flush_options, &*_handles[ constants::objects_column_index ] ); - _db->Flush( flush_options, &*_handles[ constants::metadata_column_index ] ); + _db->Flush( flush_options, &*_handles[ constants::objects_column_index ] ); + _db->Flush( flush_options, &*_handles[ constants::metadata_column_index ] ); } void rocksdb_backend::start_write_batch() { - KOINOS_ASSERT( !_write_batch, rocksdb_session_in_progress, "session already in progress" ); - _write_batch.emplace(); + KOINOS_ASSERT( !_write_batch, rocksdb_session_in_progress, "session already in progress" ); + _write_batch.emplace(); } void rocksdb_backend::end_write_batch() { - if ( _write_batch ) - { - auto status = _db->Write( _wopts, &*_write_batch ); - KOINOS_ASSERT( status.ok(), rocksdb_write_exception, "unable to write session to rocksdb database" + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); - _write_batch.reset(); - } + if( _write_batch ) + { + auto status = _db->Write( _wopts, &*_write_batch ); + KOINOS_ASSERT( status.ok(), + rocksdb_write_exception, + "unable to write session to rocksdb database" + + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); + _write_batch.reset(); + } } iterator rocksdb_backend::begin() { - KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); + KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); - auto itr = std::make_unique< rocksdb_iterator >( _db, _handles[ constants::objects_column_index ], _ropts, _cache ); - itr->_iter = std::unique_ptr< ::rocksdb::Iterator >( _db->NewIterator( *_ropts, &*_handles[ constants::objects_column_index ] ) ); - itr->_iter->SeekToFirst(); + auto itr = std::make_unique< rocksdb_iterator >( _db, _handles[ constants::objects_column_index ], _ropts, _cache ); + itr->_iter = std::unique_ptr< ::rocksdb::Iterator >( + _db->NewIterator( *_ropts, &*_handles[ constants::objects_column_index ] ) ); + itr->_iter->SeekToFirst(); - return iterator( std::unique_ptr< abstract_iterator >( std::move( itr ) ) ); + return iterator( std::unique_ptr< abstract_iterator >( std::move( itr ) ) ); } iterator rocksdb_backend::end() { - KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); + KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); - auto itr = std::make_unique< rocksdb_iterator >( _db, _handles[ constants::objects_column_index ], _ropts, _cache ); - itr->_iter = std::unique_ptr< ::rocksdb::Iterator >( _db->NewIterator( *_ropts, &*_handles[ constants::objects_column_index ] ) ); + auto itr = std::make_unique< rocksdb_iterator >( _db, _handles[ constants::objects_column_index ], _ropts, _cache ); + itr->_iter = std::unique_ptr< ::rocksdb::Iterator >( + _db->NewIterator( *_ropts, &*_handles[ constants::objects_column_index ] ) ); - return iterator( std::unique_ptr< abstract_iterator >( std::move( itr ) ) ); + return iterator( std::unique_ptr< abstract_iterator >( std::move( itr ) ) ); } void rocksdb_backend::put( const key_type& k, const value_type& v ) { - KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); - bool exists = get( k ); - - ::rocksdb::Status status; - - if ( _write_batch ) - { - status = _write_batch->Put( - &*_handles[ constants::objects_column_index ], - ::rocksdb::Slice( k ), - ::rocksdb::Slice( v ) ); - } - else - { - status = _db->Put( - _wopts, - &*_handles[ constants::objects_column_index ], - ::rocksdb::Slice( k ), - ::rocksdb::Slice( v ) ); - } - - KOINOS_ASSERT( status.ok(), rocksdb_write_exception, "unable to write to rocksdb database" + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); - - if ( !exists ) - { - _size++; - } - - std::lock_guard lock( _cache->get_mutex() ); - _cache->put( k, std::make_shared< const object_cache::value_type >( v ) ); + KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); + bool exists = get( k ); + + ::rocksdb::Status status; + + if( _write_batch ) + { + status = + _write_batch->Put( &*_handles[ constants::objects_column_index ], ::rocksdb::Slice( k ), ::rocksdb::Slice( v ) ); + } + else + { + status = + _db->Put( _wopts, &*_handles[ constants::objects_column_index ], ::rocksdb::Slice( k ), ::rocksdb::Slice( v ) ); + } + + KOINOS_ASSERT( status.ok(), + rocksdb_write_exception, + "unable to write to rocksdb database" + + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); + + if( !exists ) + { + _size++; + } + + std::lock_guard lock( _cache->get_mutex() ); + _cache->put( k, std::make_shared< const object_cache::value_type >( v ) ); } const rocksdb_backend::value_type* rocksdb_backend::get( const key_type& k ) const { - KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); + KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); - std::lock_guard lock( _cache->get_mutex() ); - auto [cache_hit, ptr] = _cache->get( k ); - if ( cache_hit ) - { - if ( ptr ) - return &*ptr; + std::lock_guard lock( _cache->get_mutex() ); + auto [ cache_hit, ptr ] = _cache->get( k ); + if( cache_hit ) + { + if( ptr ) + return &*ptr; - return nullptr; - } + return nullptr; + } + value_type value; + auto status = _db->Get( *_ropts, &*_handles[ constants::objects_column_index ], ::rocksdb::Slice( k ), &value ); - value_type value; - auto status = _db->Get( - *_ropts, - &*_handles[ constants::objects_column_index ], - ::rocksdb::Slice( k ), - &value - ); + if( status.ok() ) + return &*_cache->put( k, std::make_shared< const object_cache::value_type >( value ) ); + else if( status.IsNotFound() ) + _cache->put( k, std::shared_ptr< const object_cache::value_type >() ); - if ( status.ok() ) - return &*_cache->put( k, std::make_shared< const object_cache::value_type >( value ) ); - else if ( status.IsNotFound() ) - _cache->put( k, std::shared_ptr< const object_cache::value_type >() ); - - return nullptr; + return nullptr; } void rocksdb_backend::erase( const key_type& k ) { - KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); + KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); - bool exists = get( k ); - auto status = _db->Delete( - _wopts, - &*_handles[ constants::objects_column_index ], - ::rocksdb::Slice( k ) ); + bool exists = get( k ); + auto status = _db->Delete( _wopts, &*_handles[ constants::objects_column_index ], ::rocksdb::Slice( k ) ); - KOINOS_ASSERT( status.ok(), rocksdb_write_exception, "unable to write to rocksdb database" + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); + KOINOS_ASSERT( status.ok(), + rocksdb_write_exception, + "unable to write to rocksdb database" + + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); - if ( exists ) - { - _size--; - } + if( exists ) + { + _size--; + } - std::lock_guard lock( _cache->get_mutex() ); - _cache->put( k, std::shared_ptr< const object_cache::value_type >() ); + std::lock_guard lock( _cache->get_mutex() ); + _cache->put( k, std::shared_ptr< const object_cache::value_type >() ); } void rocksdb_backend::clear() { - KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); + KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); - for ( auto h : _handles ) - { - _db->DropColumnFamily( &*h ); - } + for( auto h: _handles ) + { + _db->DropColumnFamily( &*h ); + } - _handles.clear(); - _db.reset(); - std::lock_guard lock( _cache->get_mutex() ); - _cache->clear(); + _handles.clear(); + _db.reset(); + std::lock_guard lock( _cache->get_mutex() ); + _cache->clear(); } rocksdb_backend::size_type rocksdb_backend::size() const { - KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); + KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); - return _size; + return _size; } iterator rocksdb_backend::find( const key_type& k ) { - KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); + KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); - auto itr = std::make_unique< rocksdb_iterator >( _db, _handles[ constants::objects_column_index ], _ropts, _cache ); - auto itr_ptr = std::unique_ptr< ::rocksdb::Iterator >( _db->NewIterator( *_ropts, &*_handles[ constants::objects_column_index ] ) ); + auto itr = std::make_unique< rocksdb_iterator >( _db, _handles[ constants::objects_column_index ], _ropts, _cache ); + auto itr_ptr = std::unique_ptr< ::rocksdb::Iterator >( + _db->NewIterator( *_ropts, &*_handles[ constants::objects_column_index ] ) ); - itr_ptr->Seek( ::rocksdb::Slice( k ) ); + itr_ptr->Seek( ::rocksdb::Slice( k ) ); - if ( itr_ptr->Valid() ) - { - auto key_slice = itr_ptr->key(); + if( itr_ptr->Valid() ) + { + auto key_slice = itr_ptr->key(); - if ( k.size() == key_slice.size() - && memcmp( k.data(), key_slice.data(), k.size() ) == 0 ) - { - itr->_iter = std::move( itr_ptr ); - } - } + if( k.size() == key_slice.size() && memcmp( k.data(), key_slice.data(), k.size() ) == 0 ) + { + itr->_iter = std::move( itr_ptr ); + } + } - return iterator( std::unique_ptr< abstract_iterator >( std::move( itr ) ) ); + return iterator( std::unique_ptr< abstract_iterator >( std::move( itr ) ) ); } iterator rocksdb_backend::lower_bound( const key_type& k ) { - KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); + KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); - auto itr = std::make_unique< rocksdb_iterator >( _db, _handles[ constants::objects_column_index ], _ropts, _cache ); - itr->_iter = std::unique_ptr< ::rocksdb::Iterator >( _db->NewIterator( *_ropts, &*_handles[ constants::objects_column_index ] ) ); + auto itr = std::make_unique< rocksdb_iterator >( _db, _handles[ constants::objects_column_index ], _ropts, _cache ); + itr->_iter = std::unique_ptr< ::rocksdb::Iterator >( + _db->NewIterator( *_ropts, &*_handles[ constants::objects_column_index ] ) ); - itr->_iter->Seek( ::rocksdb::Slice( k ) ); + itr->_iter->Seek( ::rocksdb::Slice( k ) ); - return iterator( std::unique_ptr< abstract_iterator >( std::move( itr ) ) ); + return iterator( std::unique_ptr< abstract_iterator >( std::move( itr ) ) ); } void rocksdb_backend::load_metadata() { - KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); + KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); - std::string value; - auto status = _db->Get( - *_ropts, - &*_handles[ constants::metadata_column_index ], - ::rocksdb::Slice( constants::size_key ), - &value ); + std::string value; + auto status = _db->Get( *_ropts, + &*_handles[ constants::metadata_column_index ], + ::rocksdb::Slice( constants::size_key ), + &value ); - KOINOS_ASSERT( status.ok(), rocksdb_read_exception, "unable to read from rocksdb database" + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); + KOINOS_ASSERT( status.ok(), + rocksdb_read_exception, + "unable to read from rocksdb database" + + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); - _size = util::converter::to< size_type >( value ); + _size = util::converter::to< size_type >( value ); - status = _db->Get( - *_ropts, - &*_handles[ constants::metadata_column_index ], - ::rocksdb::Slice( constants::revision_key ), - &value ); + status = _db->Get( *_ropts, + &*_handles[ constants::metadata_column_index ], + ::rocksdb::Slice( constants::revision_key ), + &value ); - KOINOS_ASSERT( status.ok(), rocksdb_read_exception, "unable to read from rocksdb database" + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); + KOINOS_ASSERT( status.ok(), + rocksdb_read_exception, + "unable to read from rocksdb database" + + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); - set_revision( util::converter::to< size_type >( value ) ); + set_revision( util::converter::to< size_type >( value ) ); - status = _db->Get( - *_ropts, - &*_handles[ constants::metadata_column_index ], - ::rocksdb::Slice( constants::id_key ), - &value ); + status = + _db->Get( *_ropts, &*_handles[ constants::metadata_column_index ], ::rocksdb::Slice( constants::id_key ), &value ); - KOINOS_ASSERT( status.ok(), rocksdb_read_exception, "unable to read from rocksdb database" + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); + KOINOS_ASSERT( status.ok(), + rocksdb_read_exception, + "unable to read from rocksdb database" + + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); - set_id( util::converter::to< crypto::multihash >( value ) ); + set_id( util::converter::to< crypto::multihash >( value ) ); - status = _db->Get( - *_ropts, - &*_handles[ constants::metadata_column_index ], - ::rocksdb::Slice( constants::merkle_root_key ), - &value ); + status = _db->Get( *_ropts, + &*_handles[ constants::metadata_column_index ], + ::rocksdb::Slice( constants::merkle_root_key ), + &value ); - KOINOS_ASSERT( status.ok(), rocksdb_read_exception, "unable to read from rocksdb database" + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); + KOINOS_ASSERT( status.ok(), + rocksdb_read_exception, + "unable to read from rocksdb database" + + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); - set_merkle_root( util::converter::to< crypto::multihash >( value ) ); + set_merkle_root( util::converter::to< crypto::multihash >( value ) ); - status = _db->Get( - *_ropts, - &*_handles[ constants::metadata_column_index ], - ::rocksdb::Slice( constants::block_header_key ), - &value ); + status = _db->Get( *_ropts, + &*_handles[ constants::metadata_column_index ], + ::rocksdb::Slice( constants::block_header_key ), + &value ); - KOINOS_ASSERT( status.ok(), rocksdb_read_exception, "unable to read from rocksdb database" + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); + KOINOS_ASSERT( status.ok(), + rocksdb_read_exception, + "unable to read from rocksdb database" + + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); - set_block_header( util::converter::to< protocol::block_header >( value ) ); + set_block_header( util::converter::to< protocol::block_header >( value ) ); } void rocksdb_backend::store_metadata() { - KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); - - auto status = _db->Put( - _wopts, - &*_handles[ constants::metadata_column_index ], - ::rocksdb::Slice( constants::size_key ), - ::rocksdb::Slice( util::converter::as< std::string >( _size ) ) - ); - - KOINOS_ASSERT( status.ok(), rocksdb_write_exception, "unable to write to rocksdb database" + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); - - status = _db->Put( - _wopts, - &*_handles[ constants::metadata_column_index ], - ::rocksdb::Slice( constants::revision_key ), - ::rocksdb::Slice( util::converter::as< std::string >( revision() ) ) - ); - - KOINOS_ASSERT( status.ok(), rocksdb_write_exception, "unable to write to rocksdb database" + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); - - status = _db->Put( - _wopts, - &*_handles[ constants::metadata_column_index ], - ::rocksdb::Slice( constants::id_key ), - ::rocksdb::Slice( util::converter::as< std::string >( id() ) ) - ); - - KOINOS_ASSERT( status.ok(), rocksdb_write_exception, "unable to write to rocksdb database" + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); - - status = _db->Put( - _wopts, - &*_handles[ constants::metadata_column_index ], - ::rocksdb::Slice( constants::merkle_root_key ), - ::rocksdb::Slice( util::converter::as< std::string >( merkle_root() ) ) - ); - - KOINOS_ASSERT( status.ok(), rocksdb_write_exception, "unable to write to rocksdb database" + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); - - status = _db->Put( - _wopts, - &*_handles[ constants::metadata_column_index ], - ::rocksdb::Slice( constants::block_header_key ), - ::rocksdb::Slice( util::converter::as< std::string >( block_header() ) ) - ); - - KOINOS_ASSERT( status.ok(), rocksdb_write_exception, "unable to write to rocksdb database" + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); + KOINOS_ASSERT( _db, rocksdb_database_not_open_exception, "database not open" ); + + auto status = _db->Put( _wopts, + &*_handles[ constants::metadata_column_index ], + ::rocksdb::Slice( constants::size_key ), + ::rocksdb::Slice( util::converter::as< std::string >( _size ) ) ); + + KOINOS_ASSERT( status.ok(), + rocksdb_write_exception, + "unable to write to rocksdb database" + + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); + + status = _db->Put( _wopts, + &*_handles[ constants::metadata_column_index ], + ::rocksdb::Slice( constants::revision_key ), + ::rocksdb::Slice( util::converter::as< std::string >( revision() ) ) ); + + KOINOS_ASSERT( status.ok(), + rocksdb_write_exception, + "unable to write to rocksdb database" + + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); + + status = _db->Put( _wopts, + &*_handles[ constants::metadata_column_index ], + ::rocksdb::Slice( constants::id_key ), + ::rocksdb::Slice( util::converter::as< std::string >( id() ) ) ); + + KOINOS_ASSERT( status.ok(), + rocksdb_write_exception, + "unable to write to rocksdb database" + + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); + + status = _db->Put( _wopts, + &*_handles[ constants::metadata_column_index ], + ::rocksdb::Slice( constants::merkle_root_key ), + ::rocksdb::Slice( util::converter::as< std::string >( merkle_root() ) ) ); + + KOINOS_ASSERT( status.ok(), + rocksdb_write_exception, + "unable to write to rocksdb database" + + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); + + status = _db->Put( _wopts, + &*_handles[ constants::metadata_column_index ], + ::rocksdb::Slice( constants::block_header_key ), + ::rocksdb::Slice( util::converter::as< std::string >( block_header() ) ) ); + + KOINOS_ASSERT( status.ok(), + rocksdb_write_exception, + "unable to write to rocksdb database" + + ( status.getState() ? ", " + std::string( status.getState() ) : "" ) ); } std::shared_ptr< abstract_backend > rocksdb_backend::clone() const { - KOINOS_THROW( internal_exception, "rocksdb_backend, 'clone' not implemented" ); + KOINOS_THROW( internal_exception, "rocksdb_backend, 'clone' not implemented" ); } -} // koinos::state_db::backends::rocksdb +} // namespace koinos::state_db::backends::rocksdb diff --git a/src/koinos/state_db/backends/rocksdb/rocksdb_iterator.cpp b/src/koinos/state_db/backends/rocksdb/rocksdb_iterator.cpp index ce0351c..bb6412e 100644 --- a/src/koinos/state_db/backends/rocksdb/rocksdb_iterator.cpp +++ b/src/koinos/state_db/backends/rocksdb/rocksdb_iterator.cpp @@ -4,128 +4,127 @@ namespace koinos::state_db::backends::rocksdb { -rocksdb_iterator::rocksdb_iterator( - std::shared_ptr< ::rocksdb::DB > db, - std::shared_ptr< ::rocksdb::ColumnFamilyHandle > handle, - std::shared_ptr< const ::rocksdb::ReadOptions > opts, - std::shared_ptr< object_cache > cache -) : - _db( db ), - _handle( handle ), - _opts( opts ), - _cache( cache ) +rocksdb_iterator::rocksdb_iterator( std::shared_ptr< ::rocksdb::DB > db, + std::shared_ptr< ::rocksdb::ColumnFamilyHandle > handle, + std::shared_ptr< const ::rocksdb::ReadOptions > opts, + std::shared_ptr< object_cache > cache ): + _db( db ), + _handle( handle ), + _opts( opts ), + _cache( cache ) {} -rocksdb_iterator::rocksdb_iterator( const rocksdb_iterator& other ) : - _db( other._db ), - _handle( other._handle ), - _opts( other._opts ), - _cache( other._cache ), - _cache_value( other._cache_value ) +rocksdb_iterator::rocksdb_iterator( const rocksdb_iterator& other ): + _db( other._db ), + _handle( other._handle ), + _opts( other._opts ), + _cache( other._cache ), + _cache_value( other._cache_value ) { - if ( other._iter ) - { - _iter.reset( _db->NewIterator( *_opts, &*_handle ) ); - - if( other._iter->Valid() ) - { - _iter->Seek( other._iter->key() ); - } - } + if( other._iter ) + { + _iter.reset( _db->NewIterator( *_opts, &*_handle ) ); + + if( other._iter->Valid() ) + { + _iter->Seek( other._iter->key() ); + } + } } rocksdb_iterator::~rocksdb_iterator() {} const rocksdb_iterator::value_type& rocksdb_iterator::operator*() const { - KOINOS_ASSERT( valid(), iterator_exception, "iterator operation is invalid" ); + KOINOS_ASSERT( valid(), iterator_exception, "iterator operation is invalid" ); - if ( !_cache_value ) - { - update_cache_value(); - } + if( !_cache_value ) + { + update_cache_value(); + } - return *_cache_value; + return *_cache_value; } const rocksdb_iterator::key_type& rocksdb_iterator::key() const { - KOINOS_ASSERT( valid(), iterator_exception, "iterator operation is invalid" ); + KOINOS_ASSERT( valid(), iterator_exception, "iterator operation is invalid" ); - if ( !_key ) - { - update_cache_value(); - } + if( !_key ) + { + update_cache_value(); + } - return *_key; + return *_key; } abstract_iterator& rocksdb_iterator::operator++() { - KOINOS_ASSERT( valid(), iterator_exception, "iterator operation is invalid" ); + KOINOS_ASSERT( valid(), iterator_exception, "iterator operation is invalid" ); - _iter->Next(); - KOINOS_ASSERT( _iter->status().ok(), iterator_exception, "iterator operation is invalid" ); + _iter->Next(); + KOINOS_ASSERT( _iter->status().ok(), iterator_exception, "iterator operation is invalid" ); - update_cache_value(); + update_cache_value(); - return *this; + return *this; } abstract_iterator& rocksdb_iterator::operator--() { - if ( !valid() ) - { - _iter.reset( _db->NewIterator( *_opts, &*_handle ) ); - _iter->SeekToLast(); - } - else - { - _iter->Prev(); - KOINOS_ASSERT( _iter->status().ok(), iterator_exception, "iterator operation is invalid" ); - } - - update_cache_value(); - - return *this; + if( !valid() ) + { + _iter.reset( _db->NewIterator( *_opts, &*_handle ) ); + _iter->SeekToLast(); + } + else + { + _iter->Prev(); + KOINOS_ASSERT( _iter->status().ok(), iterator_exception, "iterator operation is invalid" ); + } + + update_cache_value(); + + return *this; } bool rocksdb_iterator::valid() const { - return _iter && _iter->Valid(); + return _iter && _iter->Valid(); } std::unique_ptr< abstract_iterator > rocksdb_iterator::copy() const { - return std::make_unique< rocksdb_iterator >( *this ); + return std::make_unique< rocksdb_iterator >( *this ); } void rocksdb_iterator::update_cache_value() const { - if ( valid() ) - { - auto key_slice = _iter->key(); - auto key = std::make_shared< std::string >( key_slice.data(), key_slice.size() ); - std::lock_guard< std::mutex > lock( _cache->get_mutex() ); - auto [cache_hit, ptr] = _cache->get( *key ); - - if ( cache_hit ) - KOINOS_ASSERT( ptr, rocksdb_internal_exception, "iterator erroneously hit null value in cache" ); - - if ( !ptr ) - { - auto value_slice = _iter->value(); - ptr = _cache->put( *key, std::make_shared< const object_cache::value_type >( value_slice.data(), value_slice.size() ) ); - } - - _cache_value = ptr; - _key = key; - } - else - { - _cache_value.reset(); - _key.reset(); - } + if( valid() ) + { + auto key_slice = _iter->key(); + auto key = std::make_shared< std::string >( key_slice.data(), key_slice.size() ); + std::lock_guard< std::mutex > lock( _cache->get_mutex() ); + auto [ cache_hit, ptr ] = _cache->get( *key ); + + if( cache_hit ) + KOINOS_ASSERT( ptr, rocksdb_internal_exception, "iterator erroneously hit null value in cache" ); + + if( !ptr ) + { + auto value_slice = _iter->value(); + ptr = _cache->put( *key, + std::make_shared< const object_cache::value_type >( value_slice.data(), value_slice.size() ) ); + } + + _cache_value = ptr; + _key = key; + } + else + { + _cache_value.reset(); + _key.reset(); + } } -} // koinos::state_db::backends::rocksdb +} // namespace koinos::state_db::backends::rocksdb diff --git a/src/koinos/state_db/merge_iterator.cpp b/src/koinos/state_db/merge_iterator.cpp index cfcd5ef..8b7e734 100644 --- a/src/koinos/state_db/merge_iterator.cpp +++ b/src/koinos/state_db/merge_iterator.cpp @@ -2,261 +2,310 @@ namespace koinos::state_db::detail { -iterator_wrapper::iterator_wrapper( backends::iterator&& i, uint64_t r, std::shared_ptr< backends::abstract_backend > b ) : - itr( std::move( i ) ), - revision( r ), - backend( b ) +iterator_wrapper::iterator_wrapper( backends::iterator&& i, + uint64_t r, + std::shared_ptr< backends::abstract_backend > b ): + itr( std::move( i ) ), + revision( r ), + backend( b ) {} -iterator_wrapper::iterator_wrapper( iterator_wrapper&& i ) : - itr( std::move( i.itr ) ), - revision( i.revision ), - backend( i.backend ) +iterator_wrapper::iterator_wrapper( iterator_wrapper&& i ): + itr( std::move( i.itr ) ), + revision( i.revision ), + backend( i.backend ) {} -iterator_wrapper::iterator_wrapper( const iterator_wrapper& i ) : - itr( i.itr ), - revision( i.revision ), - backend( i.backend ) +iterator_wrapper::iterator_wrapper( const iterator_wrapper& i ): + itr( i.itr ), + revision( i.revision ), + backend( i.backend ) {} const iterator_wrapper& iterator_wrapper::self() const { - return *this; + return *this; } bool iterator_wrapper::valid() const { - return itr != backend->end(); + return itr != backend->end(); } bool iterator_compare_less::operator()( const iterator_wrapper& lhs, const iterator_wrapper& rhs ) const { - bool lh_valid = lhs.valid(); - bool rh_valid = rhs.valid(); + bool lh_valid = lhs.valid(); + bool rh_valid = rhs.valid(); - if ( !lh_valid && !rh_valid ) return lhs.revision > rhs.revision; - if ( !lh_valid ) return false; - if ( !rh_valid ) return true; + if( !lh_valid && !rh_valid ) + return lhs.revision > rhs.revision; + if( !lh_valid ) + return false; + if( !rh_valid ) + return true; - return lhs.itr.key() < rhs.itr.key(); + return lhs.itr.key() < rhs.itr.key(); } bool iterator_compare_greater::operator()( const iterator_wrapper& lhs, const iterator_wrapper& rhs ) const { - bool lh_valid = lhs.valid(); - bool rh_valid = rhs.valid(); + bool lh_valid = lhs.valid(); + bool rh_valid = rhs.valid(); - if ( !lh_valid && !rh_valid ) return lhs.revision > rhs.revision; - if ( !lh_valid ) return false; - if ( !rh_valid ) return true; + if( !lh_valid && !rh_valid ) + return lhs.revision > rhs.revision; + if( !lh_valid ) + return false; + if( !rh_valid ) + return true; - return rhs.itr.key() < lhs.itr.key(); + return rhs.itr.key() < lhs.itr.key(); } -merge_iterator::merge_iterator( const merge_iterator& other ) : - _itr_revision_index( other._itr_revision_index ), - _delta_deque( other._delta_deque ) +merge_iterator::merge_iterator( const merge_iterator& other ): + _itr_revision_index( other._itr_revision_index ), + _delta_deque( other._delta_deque ) {} bool merge_iterator::operator==( const merge_iterator& other ) const { - // If both iterators are empty, they are true. - // But we use empty merge iterators as an optimization for an end itertor. - // So if one is empty, and the other is all end iterators, they are also equal. - if ( _itr_revision_index.size() == 0 && other._itr_revision_index.size() == 0 ) return true; - else if ( _itr_revision_index.size() == 0 ) return other.is_end(); - else if ( other._itr_revision_index.size() == 0 ) return is_end(); - - auto my_begin = _itr_revision_index.begin(); - auto other_begin = other._itr_revision_index.begin(); - - if ( !my_begin->valid() && !other_begin->valid() ) return true; - if ( !my_begin->valid() || !other_begin->valid() ) return false; - if ( my_begin->revision != other_begin->revision ) return false; - - return my_begin->itr == other_begin->itr; + // If both iterators are empty, they are true. + // But we use empty merge iterators as an optimization for an end itertor. + // So if one is empty, and the other is all end iterators, they are also equal. + if( _itr_revision_index.size() == 0 && other._itr_revision_index.size() == 0 ) + return true; + else if( _itr_revision_index.size() == 0 ) + return other.is_end(); + else if( other._itr_revision_index.size() == 0 ) + return is_end(); + + auto my_begin = _itr_revision_index.begin(); + auto other_begin = other._itr_revision_index.begin(); + + if( !my_begin->valid() && !other_begin->valid() ) + return true; + if( !my_begin->valid() || !other_begin->valid() ) + return false; + if( my_begin->revision != other_begin->revision ) + return false; + + return my_begin->itr == other_begin->itr; } merge_iterator& merge_iterator::operator++() { - auto first_itr = _itr_revision_index.begin(); - KOINOS_ASSERT( first_itr->valid(), koinos::exception, "" ); + auto first_itr = _itr_revision_index.begin(); + KOINOS_ASSERT( first_itr->valid(), koinos::exception, "" ); - _itr_revision_index.modify( first_itr, []( iterator_wrapper& i ){ ++(i.itr); } ); - resolve_conflicts(); + _itr_revision_index.modify( first_itr, + []( iterator_wrapper& i ) + { + ++( i.itr ); + } ); + resolve_conflicts(); - return *this; + return *this; } merge_iterator& merge_iterator::operator--() { - const auto& order_idx = _itr_revision_index.template get< by_order_revision >(); - - auto head_itr = order_idx.begin(); - std::optional< key_type > head_key; - - if( head_itr->valid() ) - { - head_key = head_itr->itr.key(); - } - - /* We are grabbing the current head value. - * Then iterate over all other iterators and rewind them until they have a value less - * than the current value. One of those values is what we want to decrement to. - */ - const auto& rev_idx = _itr_revision_index.template get< by_revision >(); - for( auto rev_itr = rev_idx.begin(); rev_itr != rev_idx.end(); ++rev_itr ) - { - // Only decrement iterators that have modified objects - if( rev_itr->backend->size() ) + const auto& order_idx = _itr_revision_index.template get< by_order_revision >(); + + auto head_itr = order_idx.begin(); + std::optional< key_type > head_key; + + if( head_itr->valid() ) + { + head_key = head_itr->itr.key(); + } + + /* We are grabbing the current head value. + * Then iterate over all other iterators and rewind them until they have a value less + * than the current value. One of those values is what we want to decrement to. + */ + const auto& rev_idx = _itr_revision_index.template get< by_revision >(); + for( auto rev_itr = rev_idx.begin(); rev_itr != rev_idx.end(); ++rev_itr ) + { + // Only decrement iterators that have modified objects + if( rev_itr->backend->size() ) + { + auto begin = rev_itr->backend->begin(); + + if( !head_key ) { - auto begin = rev_itr->backend->begin(); - - if( !head_key ) - { - // If there was no valid key, then bring back each iterator once, it is gauranteed to be less than the - // current value (end()). - _itr_revision_index.modify( _itr_revision_index.iterator_to( *rev_itr ), [&]( iterator_wrapper& i ){ --(i.itr); } ); - } - else - { - // Do an initial decrement if the iterator currently points to end() - if( !rev_itr->valid() ) - { - _itr_revision_index.modify( _itr_revision_index.iterator_to( *rev_itr ), [&]( iterator_wrapper& i ){ --(i.itr); } ); - } - - // Decrement back to the first key that is less than the head key - while( rev_itr->itr.key() >= *head_key && rev_itr->itr != begin ) - { - _itr_revision_index.modify( _itr_revision_index.iterator_to( *rev_itr ), [&]( iterator_wrapper& i ){ --(i.itr); } ); - } - } - - // The key at this point is guaranteed to be less than the head key (or at begin() and greator), but it - // might have been modified in a later index. We need to continue decrementing until we have a valid key. - bool dirty = true; - - while( dirty && rev_itr->valid() && rev_itr->itr != begin ) - { - dirty = is_dirty( rev_itr ); - - if( dirty ) - { - _itr_revision_index.modify( _itr_revision_index.iterator_to( *(rev_itr) ), [](iterator_wrapper& i ){ --(i.itr); } ); - } - } + // If there was no valid key, then bring back each iterator once, it is gauranteed to be less than the + // current value (end()). + _itr_revision_index.modify( _itr_revision_index.iterator_to( *rev_itr ), + [ & ]( iterator_wrapper& i ) + { + --( i.itr ); + } ); } - } - - const auto& rev_order_idx = _itr_revision_index.template get< by_reverse_order_revision >(); - auto least_itr = rev_order_idx.begin(); - - if( _delta_deque.size() > 1 ) - { - // This next bit works in two modes. - // Some indices may not have had a value less than the previous head, so they will show up first, - // we need to increment through those values until we get the the new valid least value. - if( head_key ) + else { - while( least_itr != rev_order_idx.end() && least_itr->valid() - && ( is_dirty( least_itr ) || least_itr->itr.key() >= *head_key ) ) - { - ++least_itr; - } + // Do an initial decrement if the iterator currently points to end() + if( !rev_itr->valid() ) + { + _itr_revision_index.modify( _itr_revision_index.iterator_to( *rev_itr ), + [ & ]( iterator_wrapper& i ) + { + --( i.itr ); + } ); + } + + // Decrement back to the first key that is less than the head key + while( rev_itr->itr.key() >= *head_key && rev_itr->itr != begin ) + { + _itr_revision_index.modify( _itr_revision_index.iterator_to( *rev_itr ), + [ & ]( iterator_wrapper& i ) + { + --( i.itr ); + } ); + } } - // Now least_itr points to the new least value, unless it is end() - if( least_itr != rev_order_idx.end() ) + // The key at this point is guaranteed to be less than the head key (or at begin() and greator), but it + // might have been modified in a later index. We need to continue decrementing until we have a valid key. + bool dirty = true; + + while( dirty && rev_itr->valid() && rev_itr->itr != begin ) { - ++least_itr; + dirty = is_dirty( rev_itr ); + + if( dirty ) + { + _itr_revision_index.modify( _itr_revision_index.iterator_to( *( rev_itr ) ), + []( iterator_wrapper& i ) + { + --( i.itr ); + } ); + } } - - // Now least_itr points to the next value. All of these are too much less, but are guaranteed to be valid. - // All values in this indices one past are gauranteed to be greater than the new least, or invalid by - // modification. We can increment all of them once, and then call resolve_conflicts for the new least value - // to become the head. - while( least_itr != rev_order_idx.end() && least_itr->valid() ) + } + } + + const auto& rev_order_idx = _itr_revision_index.template get< by_reverse_order_revision >(); + auto least_itr = rev_order_idx.begin(); + + if( _delta_deque.size() > 1 ) + { + // This next bit works in two modes. + // Some indices may not have had a value less than the previous head, so they will show up first, + // we need to increment through those values until we get the the new valid least value. + if( head_key ) + { + while( least_itr != rev_order_idx.end() && least_itr->valid() + && ( is_dirty( least_itr ) || least_itr->itr.key() >= *head_key ) ) { - _itr_revision_index.modify( _itr_revision_index.iterator_to( *(least_itr--) ), [](iterator_wrapper& i ){ ++(i.itr); } ); - ++least_itr; + ++least_itr; } - - resolve_conflicts(); - } - - return *this; + } + + // Now least_itr points to the new least value, unless it is end() + if( least_itr != rev_order_idx.end() ) + { + ++least_itr; + } + + // Now least_itr points to the next value. All of these are too much less, but are guaranteed to be valid. + // All values in this indices one past are gauranteed to be greater than the new least, or invalid by + // modification. We can increment all of them once, and then call resolve_conflicts for the new least value + // to become the head. + while( least_itr != rev_order_idx.end() && least_itr->valid() ) + { + _itr_revision_index.modify( _itr_revision_index.iterator_to( *( least_itr-- ) ), + []( iterator_wrapper& i ) + { + ++( i.itr ); + } ); + ++least_itr; + } + + resolve_conflicts(); + } + + return *this; } const merge_iterator::value_type& merge_iterator::operator*() const { - return _itr_revision_index.begin()->itr.operator *(); + return _itr_revision_index.begin()->itr.operator*(); } const merge_iterator::key_type& merge_iterator::key() const { - auto first_itr = _itr_revision_index.begin(); - KOINOS_ASSERT( first_itr->valid(), koinos::exception, "" ); + auto first_itr = _itr_revision_index.begin(); + KOINOS_ASSERT( first_itr->valid(), koinos::exception, "" ); - return first_itr->itr.key(); + return first_itr->itr.key(); } void merge_iterator::resolve_conflicts() { - auto first_itr = _itr_revision_index.begin(); - bool dirty = true; - - while( dirty && first_itr->valid() ) - { - dirty = is_dirty( first_itr ); - - if( dirty ) - { - _itr_revision_index.modify( first_itr, [](iterator_wrapper& i ){ ++(i.itr); } ); - } - - first_itr = _itr_revision_index.begin(); - } + auto first_itr = _itr_revision_index.begin(); + bool dirty = true; + + while( dirty && first_itr->valid() ) + { + dirty = is_dirty( first_itr ); + + if( dirty ) + { + _itr_revision_index.modify( first_itr, + []( iterator_wrapper& i ) + { + ++( i.itr ); + } ); + } + + first_itr = _itr_revision_index.begin(); + } } bool merge_iterator::is_end() const { - return std::all_of( _itr_revision_index.begin(), _itr_revision_index.end(), - []( auto& i ){ return !i.valid(); } ); + return std::all_of( _itr_revision_index.begin(), + _itr_revision_index.end(), + []( auto& i ) + { + return !i.valid(); + } ); } -merge_state::merge_state( std::shared_ptr< state_delta > head ) : - _head( head ) +merge_state::merge_state( std::shared_ptr< state_delta > head ): + _head( head ) {} merge_iterator merge_state::begin() const { - return merge_iterator( _head, [&]( std::shared_ptr< backends::abstract_backend > backend ) - { - return backend->begin(); - }); + return merge_iterator( _head, + [ & ]( std::shared_ptr< backends::abstract_backend > backend ) + { + return backend->begin(); + } ); } merge_iterator merge_state::end() const { - return merge_iterator( _head, [&]( std::shared_ptr< backends::abstract_backend > backend ) - { - return backend->end(); - }); + return merge_iterator( _head, + [ & ]( std::shared_ptr< backends::abstract_backend > backend ) + { + return backend->end(); + } ); } const merge_state::value_type* merge_state::find( const key_type& key ) const { - return _head->find( key ); + return _head->find( key ); } merge_iterator merge_state::lower_bound( const key_type& key ) const { - return merge_iterator( _head, [&]( std::shared_ptr< backends::abstract_backend > backend ) - { - return backend->lower_bound( key ); - }); + return merge_iterator( _head, + [ & ]( std::shared_ptr< backends::abstract_backend > backend ) + { + return backend->lower_bound( key ); + } ); } -} // koinos::state_db::detail +} // namespace koinos::state_db::detail diff --git a/src/koinos/state_db/merge_iterator.hpp b/src/koinos/state_db/merge_iterator.hpp index c2fe218..28d44ee 100644 --- a/src/koinos/state_db/merge_iterator.hpp +++ b/src/koinos/state_db/merge_iterator.hpp @@ -4,11 +4,11 @@ #include -#include #include #include #include #include +#include #include @@ -18,16 +18,16 @@ using namespace boost::multi_index; struct iterator_wrapper { - iterator_wrapper( backends::iterator&& i, uint64_t r, std::shared_ptr< backends::abstract_backend > b ); - iterator_wrapper( iterator_wrapper&& i ); - iterator_wrapper( const iterator_wrapper& i ); + iterator_wrapper( backends::iterator&& i, uint64_t r, std::shared_ptr< backends::abstract_backend > b ); + iterator_wrapper( iterator_wrapper&& i ); + iterator_wrapper( const iterator_wrapper& i ); - const iterator_wrapper& self() const; - bool valid() const; + const iterator_wrapper& self() const; + bool valid() const; - backends::iterator itr; - std::shared_ptr< backends::abstract_backend > backend; - uint64_t revision; + backends::iterator itr; + std::shared_ptr< backends::abstract_backend > backend; + uint64_t revision; }; // Uses revision as a tiebreaker only for when both iterators are invalid @@ -36,128 +36,119 @@ struct iterator_wrapper // (i.e. lhs < rhs == false && rhs < lhs == false ) struct iterator_compare_less { - bool operator()( const iterator_wrapper& lhs, const iterator_wrapper& rhs ) const; + bool operator()( const iterator_wrapper& lhs, const iterator_wrapper& rhs ) const; }; struct iterator_compare_greater { - bool operator()( const iterator_wrapper& lhs, const iterator_wrapper& rhs ) const; + bool operator()( const iterator_wrapper& lhs, const iterator_wrapper& rhs ) const; }; -class merge_iterator : - public boost::bidirectional_iterator_helper< - merge_iterator, - typename state_delta::value_type, - std::size_t, - const typename state_delta::value_type*, - const typename state_delta::value_type& > +class merge_iterator: public boost::bidirectional_iterator_helper< merge_iterator, + typename state_delta::value_type, + std::size_t, + const typename state_delta::value_type*, + const typename state_delta::value_type& > { - public: - using key_type = state_delta::key_type; - using value_type = state_delta::value_type; - - private: - using iterator_type = backends::iterator; - using state_delta_ptr = std::shared_ptr< state_delta >; - - struct by_order_revision; - struct by_reverse_order_revision; - struct by_revision; - - using iter_revision_index_type = multi_index_container< - iterator_wrapper, - indexed_by< - ordered_unique< tag< by_order_revision >, - composite_key< iterator_wrapper, - const_mem_fun< iterator_wrapper, const iterator_wrapper&, &iterator_wrapper::self >, - member< iterator_wrapper, uint64_t, &iterator_wrapper::revision > - >, - composite_key_compare< iterator_compare_less, std::greater< uint64_t > > - >, - ordered_unique< tag< by_reverse_order_revision >, - composite_key< iterator_wrapper, - const_mem_fun< iterator_wrapper, const iterator_wrapper&, &iterator_wrapper::self >, - member< iterator_wrapper, uint64_t, &iterator_wrapper::revision > - >, - composite_key_compare< iterator_compare_greater, std::greater< uint64_t > > - >, - ordered_unique< tag< by_revision >, member< iterator_wrapper, uint64_t, &iterator_wrapper::revision > > - > - >; - - iter_revision_index_type _itr_revision_index; - std::deque< state_delta_ptr > _delta_deque; - - public: - template< typename Initializer > - merge_iterator( state_delta_ptr head, Initializer&& init ) - { - KOINOS_ASSERT( head, internal_error, "cannot create a merge iterator on a null delta" ); - auto current_delta = head; - - do - { - _delta_deque.push_front( current_delta ); - - _itr_revision_index.emplace( - iterator_wrapper( - std::move( init( current_delta->backend() ) ), - current_delta->revision(), - current_delta->backend() - ) - ); - - current_delta = current_delta->parent(); - } while( current_delta ); - - resolve_conflicts(); - } - - merge_iterator( const merge_iterator& other ); - - bool operator ==( const merge_iterator& other ) const; - - merge_iterator& operator++(); - merge_iterator& operator--(); - - const value_type& operator*() const; - - const key_type& key() const; - - private: - template< typename ItrType > - bool is_dirty( ItrType itr ) - { - bool dirty = false; - - for ( auto i = _delta_deque.size() - 1; itr->revision < _delta_deque[i]->revision() && !dirty; --i ) - { - dirty = _delta_deque[i]->is_modified( itr->itr.key() ); - } - - return dirty; - } - - void resolve_conflicts(); - bool is_end() const; +public: + using key_type = state_delta::key_type; + using value_type = state_delta::value_type; + +private: + using iterator_type = backends::iterator; + using state_delta_ptr = std::shared_ptr< state_delta >; + + struct by_order_revision; + struct by_reverse_order_revision; + struct by_revision; + + using iter_revision_index_type = multi_index_container< + iterator_wrapper, + indexed_by< + ordered_unique< + tag< by_order_revision >, + composite_key< iterator_wrapper, + const_mem_fun< iterator_wrapper, const iterator_wrapper&, &iterator_wrapper::self >, + member< iterator_wrapper, uint64_t, &iterator_wrapper::revision > >, + composite_key_compare< iterator_compare_less, std::greater< uint64_t > > >, + ordered_unique< + tag< by_reverse_order_revision >, + composite_key< iterator_wrapper, + const_mem_fun< iterator_wrapper, const iterator_wrapper&, &iterator_wrapper::self >, + member< iterator_wrapper, uint64_t, &iterator_wrapper::revision > >, + composite_key_compare< iterator_compare_greater, std::greater< uint64_t > > >, + ordered_unique< tag< by_revision >, member< iterator_wrapper, uint64_t, &iterator_wrapper::revision > > > >; + + iter_revision_index_type _itr_revision_index; + std::deque< state_delta_ptr > _delta_deque; + +public: + template< typename Initializer > + merge_iterator( state_delta_ptr head, Initializer&& init ) + { + KOINOS_ASSERT( head, internal_error, "cannot create a merge iterator on a null delta" ); + auto current_delta = head; + + do + { + _delta_deque.push_front( current_delta ); + + _itr_revision_index.emplace( iterator_wrapper( std::move( init( current_delta->backend() ) ), + current_delta->revision(), + current_delta->backend() ) ); + + current_delta = current_delta->parent(); + } + while( current_delta ); + + resolve_conflicts(); + } + + merge_iterator( const merge_iterator& other ); + + bool operator==( const merge_iterator& other ) const; + + merge_iterator& operator++(); + merge_iterator& operator--(); + + const value_type& operator*() const; + + const key_type& key() const; + +private: + template< typename ItrType > + bool is_dirty( ItrType itr ) + { + bool dirty = false; + + for( auto i = _delta_deque.size() - 1; itr->revision < _delta_deque[ i ]->revision() && !dirty; --i ) + { + dirty = _delta_deque[ i ]->is_modified( itr->itr.key() ); + } + + return dirty; + } + + void resolve_conflicts(); + bool is_end() const; }; class merge_state { - public: - using key_type = state_delta::key_type; - using value_type = state_delta::value_type; +public: + using key_type = state_delta::key_type; + using value_type = state_delta::value_type; - merge_state( std::shared_ptr< state_delta > head ); + merge_state( std::shared_ptr< state_delta > head ); - merge_iterator begin() const; - merge_iterator end() const; + merge_iterator begin() const; + merge_iterator end() const; - const value_type* find( const key_type& key ) const; - merge_iterator lower_bound( const key_type& key ) const; + const value_type* find( const key_type& key ) const; + merge_iterator lower_bound( const key_type& key ) const; - private: - std::shared_ptr< state_delta > _head; +private: + std::shared_ptr< state_delta > _head; }; -} // koinos::state_db::detail +} // namespace koinos::state_db::detail diff --git a/src/koinos/state_db/state_db.cpp b/src/koinos/state_db/state_db.cpp index 97c5af8..cb0e545 100644 --- a/src/koinos/state_db/state_db.cpp +++ b/src/koinos/state_db/state_db.cpp @@ -16,49 +16,47 @@ #include namespace std { - template<> - struct hash< koinos::crypto::multihash > - { - std::size_t operator()( const koinos::crypto::multihash& mh ) const - { - static const std::hash< std::string > hash_fn; - return hash_fn( koinos::util::converter::as< std::string >( mh ) ); - } - }; +template<> +struct hash< koinos::crypto::multihash > +{ + std::size_t operator()( const koinos::crypto::multihash& mh ) const + { + static const std::hash< std::string > hash_fn; + return hash_fn( koinos::util::converter::as< std::string >( mh ) ); + } +}; -} +} // namespace std namespace koinos::chain { - bool operator==( const object_space& lhs, const object_space& rhs ) - { - return lhs.system() == rhs.system() - && lhs.zone() == rhs.zone() - && lhs.id() == rhs.id(); - } - - bool operator<( const object_space& lhs, const object_space& rhs ) - { - if ( lhs.system() < rhs.system() ) - { - return true; - } - else if ( lhs.system() > rhs.system() ) - { - return false; - } +bool operator==( const object_space& lhs, const object_space& rhs ) +{ + return lhs.system() == rhs.system() && lhs.zone() == rhs.zone() && lhs.id() == rhs.id(); +} - if ( lhs.zone() < rhs.zone() ) - { - return true; - } - else if ( lhs.system() > rhs.system() ) - { - return false; - } +bool operator<( const object_space& lhs, const object_space& rhs ) +{ + if( lhs.system() < rhs.system() ) + { + return true; + } + else if( lhs.system() > rhs.system() ) + { + return false; + } - return lhs.id() < rhs.id(); - } + if( lhs.zone() < rhs.zone() ) + { + return true; + } + else if( lhs.system() > rhs.system() ) + { + return false; + } + + return lhs.id() < rhs.id(); } +} // namespace koinos::chain namespace koinos::state_db { @@ -71,22 +69,17 @@ struct by_parent; using state_delta_ptr = std::shared_ptr< state_delta >; using state_multi_index_type = boost::multi_index_container< - state_delta_ptr, - boost::multi_index::indexed_by< - boost::multi_index::ordered_unique< - boost::multi_index::tag< by_id >, - boost::multi_index::const_mem_fun< state_delta, const state_node_id&, &state_delta::id > - >, - boost::multi_index::ordered_non_unique< - boost::multi_index::tag< by_parent >, - boost::multi_index::const_mem_fun< state_delta, const state_node_id&, &state_delta::parent_id > - >, - boost::multi_index::ordered_non_unique< - boost::multi_index::tag< by_revision >, - boost::multi_index::const_mem_fun< state_delta, uint64_t, &state_delta::revision > - > - > ->; + state_delta_ptr, + boost::multi_index::indexed_by< + boost::multi_index::ordered_unique< + boost::multi_index::tag< by_id >, + boost::multi_index::const_mem_fun< state_delta, const state_node_id&, &state_delta::id > >, + boost::multi_index::ordered_non_unique< + boost::multi_index::tag< by_parent >, + boost::multi_index::const_mem_fun< state_delta, const state_node_id&, &state_delta::parent_id > >, + boost::multi_index::ordered_non_unique< + boost::multi_index::tag< by_revision >, + boost::multi_index::const_mem_fun< state_delta, uint64_t, &state_delta::revision > > > >; const object_key null_key = object_key(); @@ -98,20 +91,23 @@ const object_key null_key = object_key(); */ class state_node_impl final { - public: - state_node_impl() {} - ~state_node_impl() {} +public: + state_node_impl() {} + + ~state_node_impl() {} - const object_value* get_object( const object_space& space, const object_key& key ) const; - std::pair< const object_value*, const object_key > get_next_object( const object_space& space, const object_key& key ) const; - std::pair< const object_value*, const object_key > get_prev_object( const object_space& space, const object_key& key ) const; - int64_t put_object( const object_space& space, const object_key& key, const object_value* val ); - int64_t remove_object( const object_space& space, const object_key& key ); - crypto::multihash merkle_root() const; - std::vector< protocol::state_delta_entry > get_delta_entries() const; + const object_value* get_object( const object_space& space, const object_key& key ) const; + std::pair< const object_value*, const object_key > get_next_object( const object_space& space, + const object_key& key ) const; + std::pair< const object_value*, const object_key > get_prev_object( const object_space& space, + const object_key& key ) const; + int64_t put_object( const object_space& space, const object_key& key, const object_value* val ); + int64_t remove_object( const object_space& space, const object_key& key ); + crypto::multihash merkle_root() const; + std::vector< protocol::state_delta_entry > get_delta_entries() const; - state_delta_ptr _state; - shared_lock_ptr _lock; + state_delta_ptr _state; + shared_lock_ptr _lock; }; /** @@ -124,1266 +120,1386 @@ class state_node_impl final */ class database_impl final { - public: - database_impl() {} - ~database_impl() { close_lockless(); } - - shared_lock_ptr get_shared_lock() const; - unique_lock_ptr get_unique_lock() const; - bool verify_shared_lock( const shared_lock_ptr& lock ) const; - bool verify_unique_lock( const unique_lock_ptr& lock ) const; - - void open( const std::optional< std::filesystem::path >& p, genesis_init_function init, fork_resolution_algorithm algo, const unique_lock_ptr& lock ); - void open( const std::optional< std::filesystem::path >& p, genesis_init_function init, state_node_comparator_function comp, const unique_lock_ptr& lock ); - void open_lockless( const std::optional< std::filesystem::path >& p, genesis_init_function init, state_node_comparator_function comp ); - void close( const unique_lock_ptr& lock ); - void close_lockless(); - - void reset( const unique_lock_ptr& lock ); - state_node_ptr get_node_at_revision( uint64_t revision, const state_node_id& child, const shared_lock_ptr& lock ) const; - state_node_ptr get_node_at_revision( uint64_t revision, const state_node_id& child, const unique_lock_ptr& lock ) const; - state_node_ptr get_node( const state_node_id& node_id, const shared_lock_ptr& lock ) const; - state_node_ptr get_node( const state_node_id& node_id, const unique_lock_ptr& lock ) const; - state_node_ptr get_node_lockless( const state_node_id& node_id ) const; - state_node_ptr create_writable_node( const state_node_id& parent_id, const state_node_id& new_id, const protocol::block_header& header, const shared_lock_ptr& lock ); - state_node_ptr create_writable_node( const state_node_id& parent_id, const state_node_id& new_id, const protocol::block_header& header, const unique_lock_ptr& lock ); - state_node_ptr clone_node( const state_node_id& node_id, const state_node_id& new_id, const protocol::block_header& header, const shared_lock_ptr& lock ); - state_node_ptr clone_node( const state_node_id& node_id, const state_node_id& new_id, const protocol::block_header& header, const unique_lock_ptr& lock ); - void finalize_node( const state_node_id& node, const shared_lock_ptr& lock ); - void finalize_node( const state_node_id& node, const unique_lock_ptr& lock ); - void discard_node( const state_node_id& node, const std::unordered_set< state_node_id >& whitelist, const shared_lock_ptr& lock ); - void discard_node( const state_node_id& node, const std::unordered_set< state_node_id >& whitelist, const unique_lock_ptr& lock ); - void discard_node_lockless( const state_node_id& node, const std::unordered_set< state_node_id >& whitelist ); - void commit_node( const state_node_id& node, const unique_lock_ptr& lock ); - - state_node_ptr get_head( const shared_lock_ptr& lock ) const; - state_node_ptr get_head( const unique_lock_ptr& lock ) const; - state_node_ptr get_head_lockless() const; - std::vector< state_node_ptr > get_fork_heads( const shared_lock_ptr& lock ) const; - std::vector< state_node_ptr > get_fork_heads( const unique_lock_ptr& lock ) const; - std::vector< state_node_ptr > get_all_nodes( const shared_lock_ptr& lock ) const; - std::vector< state_node_ptr > get_all_nodes( const unique_lock_ptr& lock ) const; - state_node_ptr get_root( const shared_lock_ptr& lock ) const; - state_node_ptr get_root( const unique_lock_ptr& lock ) const; - state_node_ptr get_root_lockless() const; - - bool is_open() const; - - std::optional< std::filesystem::path > _path; - genesis_init_function _init_func = nullptr; - state_node_comparator_function _comp = nullptr; - - state_multi_index_type _index; - state_delta_ptr _head; - std::map< state_node_id, state_delta_ptr > _fork_heads; - state_delta_ptr _root; - - /* Regarding mutexes used for synchronizing state_db... - * - * There are three mutexes that can be locked. They are: - * - _index_mutex (locks access to _index) - * - _node_mutex (locks access to creating new state_node_ptrs) - * - state_delta::cv_mutex() (locks access to a state_delta cv) - * - * Shared locks on the _node_mutex must exist beyond the scope of calls to state_db, - * so _node_mutex must be locked first. - * - * Consequently, _index_mutex must be locked last. All functions in state_db MUST - * follow this convention or we risk deadlock. - */ - mutable std::timed_mutex _index_mutex; - mutable std::shared_mutex _node_mutex; - mutable std::shared_mutex _fork_heads_mutex; +public: + database_impl() {} + + ~database_impl() + { + close_lockless(); + } + + shared_lock_ptr get_shared_lock() const; + unique_lock_ptr get_unique_lock() const; + bool verify_shared_lock( const shared_lock_ptr& lock ) const; + bool verify_unique_lock( const unique_lock_ptr& lock ) const; + + void open( const std::optional< std::filesystem::path >& p, + genesis_init_function init, + fork_resolution_algorithm algo, + const unique_lock_ptr& lock ); + void open( const std::optional< std::filesystem::path >& p, + genesis_init_function init, + state_node_comparator_function comp, + const unique_lock_ptr& lock ); + void open_lockless( const std::optional< std::filesystem::path >& p, + genesis_init_function init, + state_node_comparator_function comp ); + void close( const unique_lock_ptr& lock ); + void close_lockless(); + + void reset( const unique_lock_ptr& lock ); + state_node_ptr + get_node_at_revision( uint64_t revision, const state_node_id& child, const shared_lock_ptr& lock ) const; + state_node_ptr + get_node_at_revision( uint64_t revision, const state_node_id& child, const unique_lock_ptr& lock ) const; + state_node_ptr get_node( const state_node_id& node_id, const shared_lock_ptr& lock ) const; + state_node_ptr get_node( const state_node_id& node_id, const unique_lock_ptr& lock ) const; + state_node_ptr get_node_lockless( const state_node_id& node_id ) const; + state_node_ptr create_writable_node( const state_node_id& parent_id, + const state_node_id& new_id, + const protocol::block_header& header, + const shared_lock_ptr& lock ); + state_node_ptr create_writable_node( const state_node_id& parent_id, + const state_node_id& new_id, + const protocol::block_header& header, + const unique_lock_ptr& lock ); + state_node_ptr clone_node( const state_node_id& node_id, + const state_node_id& new_id, + const protocol::block_header& header, + const shared_lock_ptr& lock ); + state_node_ptr clone_node( const state_node_id& node_id, + const state_node_id& new_id, + const protocol::block_header& header, + const unique_lock_ptr& lock ); + void finalize_node( const state_node_id& node, const shared_lock_ptr& lock ); + void finalize_node( const state_node_id& node, const unique_lock_ptr& lock ); + void discard_node( const state_node_id& node, + const std::unordered_set< state_node_id >& whitelist, + const shared_lock_ptr& lock ); + void discard_node( const state_node_id& node, + const std::unordered_set< state_node_id >& whitelist, + const unique_lock_ptr& lock ); + void discard_node_lockless( const state_node_id& node, const std::unordered_set< state_node_id >& whitelist ); + void commit_node( const state_node_id& node, const unique_lock_ptr& lock ); + + state_node_ptr get_head( const shared_lock_ptr& lock ) const; + state_node_ptr get_head( const unique_lock_ptr& lock ) const; + state_node_ptr get_head_lockless() const; + std::vector< state_node_ptr > get_fork_heads( const shared_lock_ptr& lock ) const; + std::vector< state_node_ptr > get_fork_heads( const unique_lock_ptr& lock ) const; + std::vector< state_node_ptr > get_all_nodes( const shared_lock_ptr& lock ) const; + std::vector< state_node_ptr > get_all_nodes( const unique_lock_ptr& lock ) const; + state_node_ptr get_root( const shared_lock_ptr& lock ) const; + state_node_ptr get_root( const unique_lock_ptr& lock ) const; + state_node_ptr get_root_lockless() const; + + bool is_open() const; + + std::optional< std::filesystem::path > _path; + genesis_init_function _init_func = nullptr; + state_node_comparator_function _comp = nullptr; + + state_multi_index_type _index; + state_delta_ptr _head; + std::map< state_node_id, state_delta_ptr > _fork_heads; + state_delta_ptr _root; + + /* Regarding mutexes used for synchronizing state_db... + * + * There are three mutexes that can be locked. They are: + * - _index_mutex (locks access to _index) + * - _node_mutex (locks access to creating new state_node_ptrs) + * - state_delta::cv_mutex() (locks access to a state_delta cv) + * + * Shared locks on the _node_mutex must exist beyond the scope of calls to state_db, + * so _node_mutex must be locked first. + * + * Consequently, _index_mutex must be locked last. All functions in state_db MUST + * follow this convention or we risk deadlock. + */ + mutable std::timed_mutex _index_mutex; + mutable std::shared_mutex _node_mutex; + mutable std::shared_mutex _fork_heads_mutex; }; shared_lock_ptr database_impl::get_shared_lock() const { - return std::make_shared< std::shared_lock< std::shared_mutex > >( _node_mutex ); + return std::make_shared< std::shared_lock< std::shared_mutex > >( _node_mutex ); } unique_lock_ptr database_impl::get_unique_lock() const { - return std::make_shared< std::unique_lock< std::shared_mutex > >( _node_mutex ); + return std::make_shared< std::unique_lock< std::shared_mutex > >( _node_mutex ); } bool database_impl::verify_shared_lock( const shared_lock_ptr& lock ) const { - if ( !lock ) - return false; + if( !lock ) + return false; - if ( !lock->owns_lock() ) - return false; + if( !lock->owns_lock() ) + return false; - return lock->mutex() == &_node_mutex; + return lock->mutex() == &_node_mutex; } bool database_impl::verify_unique_lock( const unique_lock_ptr& lock ) const { - if ( !lock ) - return false; + if( !lock ) + return false; - if ( !lock->owns_lock() ) - return false; + if( !lock->owns_lock() ) + return false; - return lock->mutex() == &_node_mutex; + return lock->mutex() == &_node_mutex; } void database_impl::reset( const unique_lock_ptr& lock ) { - // - // This method closes, wipes and re-opens the database. - // - // So the caller needs to be very careful to only call this method if deleting the database is desirable! - // - KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - std::unique_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); - - KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); - // Wipe and start over from empty database! - _root->clear(); - close_lockless(); - open_lockless( _path, _init_func, _comp ); -} - -void database_impl::open( const std::optional< std::filesystem::path >& p, genesis_init_function init, fork_resolution_algorithm algo, const unique_lock_ptr& lock ) -{ - KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); - - state_node_comparator_function comp; - - switch ( algo ) - { - case fork_resolution_algorithm::block_time: - comp = &block_time_comparator; - break; - case fork_resolution_algorithm::pob: - comp = &pob_comparator; - break; - case fork_resolution_algorithm::fifo: - [[fallthrough]]; - default: - comp = &fifo_comparator; - } - - open( p, init, comp, lock ); -} - -void database_impl::open( const std::optional< std::filesystem::path >& p, genesis_init_function init, state_node_comparator_function comp, const unique_lock_ptr& lock ) -{ - KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - std::unique_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); - open_lockless( p, init, comp ); -} - -void database_impl::open_lockless( const std::optional< std::filesystem::path >& p, genesis_init_function init, state_node_comparator_function comp ) -{ - auto root = std::make_shared< state_node >(); - root->_impl->_state = std::make_shared< state_delta >( p ); - _init_func = init; - _comp = comp; - - if ( !root->revision() && root->_impl->_state->is_empty() && _init_func ) - { - init( root ); - } - root->_impl->_state->finalize(); - _index.insert( root->_impl->_state ); - _root = root->_impl->_state; - _head = root->_impl->_state; - _fork_heads.insert_or_assign( _head->id(), _head ); - - _path = p; + // + // This method closes, wipes and re-opens the database. + // + // So the caller needs to be very careful to only call this method if deleting the database is desirable! + // + KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + std::unique_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); + + KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); + // Wipe and start over from empty database! + _root->clear(); + close_lockless(); + open_lockless( _path, _init_func, _comp ); +} + +void database_impl::open( const std::optional< std::filesystem::path >& p, + genesis_init_function init, + fork_resolution_algorithm algo, + const unique_lock_ptr& lock ) +{ + KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); + + state_node_comparator_function comp; + + switch( algo ) + { + case fork_resolution_algorithm::block_time: + comp = &block_time_comparator; + break; + case fork_resolution_algorithm::pob: + comp = &pob_comparator; + break; + case fork_resolution_algorithm::fifo: + [[fallthrough]]; + default: + comp = &fifo_comparator; + } + + open( p, init, comp, lock ); +} + +void database_impl::open( const std::optional< std::filesystem::path >& p, + genesis_init_function init, + state_node_comparator_function comp, + const unique_lock_ptr& lock ) +{ + KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + std::unique_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); + open_lockless( p, init, comp ); +} + +void database_impl::open_lockless( const std::optional< std::filesystem::path >& p, + genesis_init_function init, + state_node_comparator_function comp ) +{ + auto root = std::make_shared< state_node >(); + root->_impl->_state = std::make_shared< state_delta >( p ); + _init_func = init; + _comp = comp; + + if( !root->revision() && root->_impl->_state->is_empty() && _init_func ) + { + init( root ); + } + root->_impl->_state->finalize(); + _index.insert( root->_impl->_state ); + _root = root->_impl->_state; + _head = root->_impl->_state; + _fork_heads.insert_or_assign( _head->id(), _head ); + + _path = p; } void database_impl::close( const unique_lock_ptr& lock ) { - KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - std::unique_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); - close_lockless(); + KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + std::unique_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); + close_lockless(); } void database_impl::close_lockless() { - _fork_heads.clear(); - _root.reset(); - _head.reset(); - _index.clear(); + _fork_heads.clear(); + _root.reset(); + _head.reset(); + _index.clear(); } -state_node_ptr database_impl::get_node_at_revision( uint64_t revision, const state_node_id& child_id, const shared_lock_ptr& lock ) const +state_node_ptr database_impl::get_node_at_revision( uint64_t revision, + const state_node_id& child_id, + const shared_lock_ptr& lock ) const { - KOINOS_ASSERT( verify_shared_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); - KOINOS_ASSERT( revision >= _root->revision(), illegal_argument, - "cannot ask for node with revision less than root. root rev: ${root}, requested: ${req}", - ("root", _root->revision())("req", revision) ); + KOINOS_ASSERT( verify_shared_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); + KOINOS_ASSERT( revision >= _root->revision(), + illegal_argument, + "cannot ask for node with revision less than root. root rev: ${root}, requested: ${req}", + ( "root", _root->revision() )( "req", revision ) ); - if( revision == _root->revision() ) - { - auto root = get_root_lockless(); - if ( root ) - root->_impl->_lock = lock; + if( revision == _root->revision() ) + { + auto root = get_root_lockless(); + if( root ) + root->_impl->_lock = lock; - return root; - } + return root; + } - auto child = get_node_lockless( child_id ); - if( !child ) - child = get_head_lockless(); + auto child = get_node_lockless( child_id ); + if( !child ) + child = get_head_lockless(); - state_delta_ptr delta = child->_impl->_state; + state_delta_ptr delta = child->_impl->_state; - while( delta->revision() > revision ) - { - delta = delta->parent(); - } + while( delta->revision() > revision ) + { + delta = delta->parent(); + } - auto node_itr = _index.find( delta->id() ); + auto node_itr = _index.find( delta->id() ); - KOINOS_ASSERT( node_itr != _index.end(), internal_error, - "could not find state node associated with linked state_delta ${id}", ("id", delta->id() ) ); + KOINOS_ASSERT( node_itr != _index.end(), + internal_error, + "could not find state node associated with linked state_delta ${id}", + ( "id", delta->id() ) ); - auto node = std::make_shared< state_node >(); - node->_impl->_state = *node_itr; - node->_impl->_lock = lock; - return node; + auto node = std::make_shared< state_node >(); + node->_impl->_state = *node_itr; + node->_impl->_lock = lock; + return node; } -state_node_ptr database_impl::get_node_at_revision( uint64_t revision, const state_node_id& child_id, const unique_lock_ptr& lock ) const +state_node_ptr database_impl::get_node_at_revision( uint64_t revision, + const state_node_id& child_id, + const unique_lock_ptr& lock ) const { - KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); - KOINOS_ASSERT( revision >= _root->revision(), illegal_argument, - "cannot ask for node with revision less than root. root rev: ${root}, requested: ${req}", - ("root", _root->revision())("req", revision) ); + KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); + KOINOS_ASSERT( revision >= _root->revision(), + illegal_argument, + "cannot ask for node with revision less than root. root rev: ${root}, requested: ${req}", + ( "root", _root->revision() )( "req", revision ) ); - if( revision == _root->revision() ) - { - auto root = get_root_lockless(); + if( revision == _root->revision() ) + { + auto root = get_root_lockless(); - return root; - } + return root; + } - auto child = get_node_lockless( child_id ); - if( !child ) - child = get_head_lockless(); + auto child = get_node_lockless( child_id ); + if( !child ) + child = get_head_lockless(); - state_delta_ptr delta = child->_impl->_state; + state_delta_ptr delta = child->_impl->_state; - while( delta->revision() > revision ) - { - delta = delta->parent(); - } + while( delta->revision() > revision ) + { + delta = delta->parent(); + } - auto node_itr = _index.find( delta->id() ); + auto node_itr = _index.find( delta->id() ); - KOINOS_ASSERT( node_itr != _index.end(), internal_error, - "could not find state node associated with linked state_delta ${id}", ("id", delta->id() ) ); + KOINOS_ASSERT( node_itr != _index.end(), + internal_error, + "could not find state node associated with linked state_delta ${id}", + ( "id", delta->id() ) ); - auto node = std::make_shared< state_node >(); - node->_impl->_state = *node_itr; - return node; + auto node = std::make_shared< state_node >(); + node->_impl->_state = *node_itr; + return node; } state_node_ptr database_impl::get_node( const state_node_id& node_id, const shared_lock_ptr& lock ) const { - KOINOS_ASSERT( verify_shared_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + KOINOS_ASSERT( verify_shared_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - auto node = get_node_lockless( node_id ); - if ( node ) - node->_impl->_lock = lock; + auto node = get_node_lockless( node_id ); + if( node ) + node->_impl->_lock = lock; - return node; + return node; } state_node_ptr database_impl::get_node( const state_node_id& node_id, const unique_lock_ptr& lock ) const { - KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - auto node = get_node_lockless( node_id ); + auto node = get_node_lockless( node_id ); - return node; + return node; } state_node_ptr database_impl::get_node_lockless( const state_node_id& node_id ) const { - KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); + KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); - auto node_itr = _index.find( node_id ); + auto node_itr = _index.find( node_id ); - if ( node_itr != _index.end() ) - { - auto node = std::make_shared< state_node >(); - node->_impl->_state = *node_itr; - return node; - } + if( node_itr != _index.end() ) + { + auto node = std::make_shared< state_node >(); + node->_impl->_state = *node_itr; + return node; + } - return state_node_ptr(); + return state_node_ptr(); } -state_node_ptr database_impl::create_writable_node( const state_node_id& parent_id, const state_node_id& new_id, const protocol::block_header& header, const shared_lock_ptr& lock ) +state_node_ptr database_impl::create_writable_node( const state_node_id& parent_id, + const state_node_id& new_id, + const protocol::block_header& header, + const shared_lock_ptr& lock ) { - KOINOS_ASSERT( verify_shared_lock( lock ), illegal_argument, "database is not properly locked" );; + KOINOS_ASSERT( verify_shared_lock( lock ), illegal_argument, "database is not properly locked" ); + ; + + // Needs to be configurable + auto timeout = std::chrono::system_clock::now() + std::chrono::seconds( 1 ); - // Needs to be configurable - auto timeout = std::chrono::system_clock::now() + std::chrono::seconds( 1 ); + state_node_ptr parent_state = get_node( parent_id, lock ); - state_node_ptr parent_state = get_node( parent_id, lock ); + if( parent_state ) + { + std::unique_lock< std::timed_mutex > cv_lock( parent_state->_impl->_state->cv_mutex(), timeout ); - if ( parent_state ) - { - std::unique_lock< std::timed_mutex > cv_lock( parent_state->_impl->_state->cv_mutex(), timeout ); + // We need to own the lock + if( cv_lock.owns_lock() ) + { + // Check if the node is finalized + bool is_finalized = parent_state->is_finalized(); - // We need to own the lock - if ( cv_lock.owns_lock() ) + // If the node is finalized, try to wait for the node to be finalized + if( !is_finalized + && parent_state->_impl->_state->cv().wait_until( cv_lock, timeout ) == std::cv_status::no_timeout ) + is_finalized = parent_state->is_finalized(); + + // Finally, if the node is finalized, we can create a new writable node with the desired parent + if( is_finalized ) { - // Check if the node is finalized - bool is_finalized = parent_state->is_finalized(); - - // If the node is finalized, try to wait for the node to be finalized - if ( !is_finalized && parent_state->_impl->_state->cv().wait_until( cv_lock, timeout ) == std::cv_status::no_timeout ) - is_finalized = parent_state->is_finalized(); - - // Finally, if the node is finalized, we can create a new writable node with the desired parent - if ( is_finalized ) - { - auto node = std::make_shared< state_node >(); - node->_impl->_state = parent_state->_impl->_state->make_child( new_id, header ); - - std::unique_lock< std::timed_mutex > index_lock( _index_mutex, timeout ); - - // Ensure the parent node still exists in the index and then insert the child node - if ( index_lock.owns_lock() && _index.find( parent_id ) != _index.end() && _index.insert( node->_impl->_state ).second ) - { - node->_impl->_lock = lock; - return node; - } - } + auto node = std::make_shared< state_node >(); + node->_impl->_state = parent_state->_impl->_state->make_child( new_id, header ); + + std::unique_lock< std::timed_mutex > index_lock( _index_mutex, timeout ); + + // Ensure the parent node still exists in the index and then insert the child node + if( index_lock.owns_lock() && _index.find( parent_id ) != _index.end() + && _index.insert( node->_impl->_state ).second ) + { + node->_impl->_lock = lock; + return node; + } } - } + } + } - return state_node_ptr(); + return state_node_ptr(); } -state_node_ptr database_impl::create_writable_node( const state_node_id& parent_id, const state_node_id& new_id, const protocol::block_header& header, const unique_lock_ptr& lock ) +state_node_ptr database_impl::create_writable_node( const state_node_id& parent_id, + const state_node_id& new_id, + const protocol::block_header& header, + const unique_lock_ptr& lock ) { - KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" );; + KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); + ; - // Needs to be configurable - auto timeout = std::chrono::system_clock::now() + std::chrono::seconds( 1 ); + // Needs to be configurable + auto timeout = std::chrono::system_clock::now() + std::chrono::seconds( 1 ); - state_node_ptr parent_state = get_node( parent_id, lock ); + state_node_ptr parent_state = get_node( parent_id, lock ); - if ( parent_state ) - { - std::unique_lock< std::timed_mutex > cv_lock( parent_state->_impl->_state->cv_mutex(), timeout ); + if( parent_state ) + { + std::unique_lock< std::timed_mutex > cv_lock( parent_state->_impl->_state->cv_mutex(), timeout ); - // We need to own the lock - if ( cv_lock.owns_lock() ) - { - // Check if the node is finalized - bool is_finalized = parent_state->is_finalized(); - - // If the node is finalized, try to wait for the node to be finalized - if ( !is_finalized && parent_state->_impl->_state->cv().wait_until( cv_lock, timeout ) == std::cv_status::no_timeout ) - is_finalized = parent_state->is_finalized(); - - // Finally, if the node is finalized, we can create a new writable node with the desired parent - if ( is_finalized ) - { - auto node = std::make_shared< state_node >(); - node->_impl->_state = parent_state->_impl->_state->make_child( new_id, header ); - - std::unique_lock< std::timed_mutex > index_lock( _index_mutex, timeout ); - - // Ensure the parent node still exists in the index and then insert the child node - if ( index_lock.owns_lock() && _index.find( parent_id ) != _index.end() && _index.insert( node->_impl->_state ).second ) - { - return node; - } - } - } - } + // We need to own the lock + if( cv_lock.owns_lock() ) + { + // Check if the node is finalized + bool is_finalized = parent_state->is_finalized(); - return state_node_ptr(); -} - -state_node_ptr database_impl::clone_node( const state_node_id& node_id, const state_node_id& new_id, const protocol::block_header& header, const shared_lock_ptr& lock ) -{ - KOINOS_ASSERT( verify_shared_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); + // If the node is finalized, try to wait for the node to be finalized + if( !is_finalized + && parent_state->_impl->_state->cv().wait_until( cv_lock, timeout ) == std::cv_status::no_timeout ) + is_finalized = parent_state->is_finalized(); - auto node = get_node_lockless( node_id ); - KOINOS_ASSERT( node, illegal_argument, "node ${n} not found.", ("n", node_id) ); - KOINOS_ASSERT( !node->is_finalized(), illegal_argument, "cannot clone finalized node" ); + // Finally, if the node is finalized, we can create a new writable node with the desired parent + if( is_finalized ) + { + auto node = std::make_shared< state_node >(); + node->_impl->_state = parent_state->_impl->_state->make_child( new_id, header ); - auto new_node = std::make_shared< state_node >(); - new_node->_impl->_state = node->_impl->_state->clone( new_id, header ); + std::unique_lock< std::timed_mutex > index_lock( _index_mutex, timeout ); - if ( _index.insert( new_node->_impl->_state ).second ) - { - new_node->_impl->_lock = lock; - return new_node; - } + // Ensure the parent node still exists in the index and then insert the child node + if( index_lock.owns_lock() && _index.find( parent_id ) != _index.end() + && _index.insert( node->_impl->_state ).second ) + { + return node; + } + } + } + } - return state_node_ptr(); + return state_node_ptr(); } -state_node_ptr database_impl::clone_node( const state_node_id& node_id, const state_node_id& new_id, const protocol::block_header& header, const unique_lock_ptr& lock ) +state_node_ptr database_impl::clone_node( const state_node_id& node_id, + const state_node_id& new_id, + const protocol::block_header& header, + const shared_lock_ptr& lock ) { - KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); + KOINOS_ASSERT( verify_shared_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); - auto node = get_node_lockless( node_id ); - KOINOS_ASSERT( node, illegal_argument, "node ${n} not found.", ("n", node_id) ); - KOINOS_ASSERT( !node->is_finalized(), illegal_argument, "cannot clone finalized node" ); + auto node = get_node_lockless( node_id ); + KOINOS_ASSERT( node, illegal_argument, "node ${n} not found.", ( "n", node_id ) ); + KOINOS_ASSERT( !node->is_finalized(), illegal_argument, "cannot clone finalized node" ); - auto new_node = std::make_shared< state_node >(); - new_node->_impl->_state = node->_impl->_state->clone( new_id, header ); + auto new_node = std::make_shared< state_node >(); + new_node->_impl->_state = node->_impl->_state->clone( new_id, header ); - if ( _index.insert( new_node->_impl->_state ).second ) - { - return new_node; - } + if( _index.insert( new_node->_impl->_state ).second ) + { + new_node->_impl->_lock = lock; + return new_node; + } - return state_node_ptr(); + return state_node_ptr(); } -void database_impl::finalize_node( const state_node_id& node_id, const shared_lock_ptr& lock ) +state_node_ptr database_impl::clone_node( const state_node_id& node_id, + const state_node_id& new_id, + const protocol::block_header& header, + const unique_lock_ptr& lock ) { - KOINOS_ASSERT( verify_shared_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); - auto node = get_node_lockless( node_id ); - KOINOS_ASSERT( node, illegal_argument, "node ${n} not found.", ("n", node_id) ); - - { - std::lock_guard< std::timed_mutex > index_lock( node->_impl->_state->cv_mutex() ); - - node->_impl->_state->finalize(); - } - - node->_impl->_state->cv().notify_all(); - - if ( node->revision() > _head->revision() ) - { - _head = node->_impl->_state; - } - else if ( node->revision() == _head->revision() ) - { - std::unique_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); - fork_list forks; - forks.reserve( _fork_heads.size() ); - std::transform( - std::begin( _fork_heads ), std::end( _fork_heads ), std::back_inserter( forks ), - []( const auto& entry ) - { - state_node_ptr s = std::make_shared< state_node >(); - s->_impl->_state = entry.second; - return s; - } - ); - - auto head = get_head_lockless(); - if ( auto new_head = _comp( forks, head, node ); new_head != nullptr ) - { - _head = new_head->_impl->_state; - } - else - { - _head = head->parent()->_impl->_state; - auto head_itr = _fork_heads.find( head->id() ); - if ( head_itr != std::end( _fork_heads ) ) - _fork_heads.erase( head_itr ); - _fork_heads.insert_or_assign( head->parent()->id(), _head ); - } - } + KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); - // When node is finalized, parent node needs to be removed from heads, if it exists. - std::unique_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); - if ( node->parent_id() != _head->id() ) - { - auto parent_itr = _fork_heads.find( node->parent_id() ); - if ( parent_itr != std::end( _fork_heads ) ) - _fork_heads.erase( parent_itr ); + auto node = get_node_lockless( node_id ); + KOINOS_ASSERT( node, illegal_argument, "node ${n} not found.", ( "n", node_id ) ); + KOINOS_ASSERT( !node->is_finalized(), illegal_argument, "cannot clone finalized node" ); - _fork_heads.insert_or_assign( node->id(), node->_impl->_state ); - } -} + auto new_node = std::make_shared< state_node >(); + new_node->_impl->_state = node->_impl->_state->clone( new_id, header ); -void database_impl::finalize_node( const state_node_id& node_id, const unique_lock_ptr& lock ) -{ - KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); - auto node = get_node_lockless( node_id ); - KOINOS_ASSERT( node, illegal_argument, "node ${n} not found.", ("n", node_id) ); - - { - std::lock_guard< std::timed_mutex > index_lock( node->_impl->_state->cv_mutex() ); - - node->_impl->_state->finalize(); - } - - node->_impl->_state->cv().notify_all(); - - if ( node->revision() > _head->revision() ) - { - _head = node->_impl->_state; - } - else if ( node->revision() == _head->revision() ) - { - std::unique_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); - fork_list forks; - forks.reserve( _fork_heads.size() ); - std::transform( - std::begin( _fork_heads ), std::end( _fork_heads ), std::back_inserter( forks ), - []( const auto& entry ) - { - state_node_ptr s = std::make_shared< state_node >(); - s->_impl->_state = entry.second; - return s; - } - ); - - auto head = get_head_lockless(); - if ( auto new_head = _comp( forks, head, node ); new_head != nullptr ) - { - _head = new_head->_impl->_state; - } - else - { - _head = head->parent()->_impl->_state; - auto head_itr = _fork_heads.find( head->id() ); - if ( head_itr != std::end( _fork_heads ) ) - _fork_heads.erase( head_itr ); - _fork_heads.insert_or_assign( head->parent()->id(), _head ); - } - } + if( _index.insert( new_node->_impl->_state ).second ) + { + return new_node; + } - // When node is finalized, parent node needs to be removed from heads, if it exists. - std::unique_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); - if ( node->parent_id() != _head->id() ) - { - auto parent_itr = _fork_heads.find( node->parent_id() ); - if ( parent_itr != std::end( _fork_heads ) ) - _fork_heads.erase( parent_itr ); - - _fork_heads.insert_or_assign( node->id(), node->_impl->_state ); - } -} - -void database_impl::discard_node( const state_node_id& node_id, const std::unordered_set< state_node_id >& whitelist, const shared_lock_ptr& lock ) -{ - KOINOS_ASSERT( verify_shared_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - std::unique_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); - discard_node_lockless( node_id, whitelist ); + return state_node_ptr(); } -void database_impl::discard_node( const state_node_id& node_id, const std::unordered_set< state_node_id >& whitelist, const unique_lock_ptr& lock ) +void database_impl::finalize_node( const state_node_id& node_id, const shared_lock_ptr& lock ) { - KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - std::unique_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); - discard_node_lockless( node_id, whitelist ); + KOINOS_ASSERT( verify_shared_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); + auto node = get_node_lockless( node_id ); + KOINOS_ASSERT( node, illegal_argument, "node ${n} not found.", ( "n", node_id ) ); + + { + std::lock_guard< std::timed_mutex > index_lock( node->_impl->_state->cv_mutex() ); + + node->_impl->_state->finalize(); + } + + node->_impl->_state->cv().notify_all(); + + if( node->revision() > _head->revision() ) + { + _head = node->_impl->_state; + } + else if( node->revision() == _head->revision() ) + { + std::unique_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); + fork_list forks; + forks.reserve( _fork_heads.size() ); + std::transform( std::begin( _fork_heads ), + std::end( _fork_heads ), + std::back_inserter( forks ), + []( const auto& entry ) + { + state_node_ptr s = std::make_shared< state_node >(); + s->_impl->_state = entry.second; + return s; + } ); + + auto head = get_head_lockless(); + if( auto new_head = _comp( forks, head, node ); new_head != nullptr ) + { + _head = new_head->_impl->_state; + } + else + { + _head = head->parent()->_impl->_state; + auto head_itr = _fork_heads.find( head->id() ); + if( head_itr != std::end( _fork_heads ) ) + _fork_heads.erase( head_itr ); + _fork_heads.insert_or_assign( head->parent()->id(), _head ); + } + } + + // When node is finalized, parent node needs to be removed from heads, if it exists. + std::unique_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); + if( node->parent_id() != _head->id() ) + { + auto parent_itr = _fork_heads.find( node->parent_id() ); + if( parent_itr != std::end( _fork_heads ) ) + _fork_heads.erase( parent_itr ); + + _fork_heads.insert_or_assign( node->id(), node->_impl->_state ); + } } -void database_impl::discard_node_lockless( const state_node_id& node_id, const std::unordered_set< state_node_id >& whitelist ) +void database_impl::finalize_node( const state_node_id& node_id, const unique_lock_ptr& lock ) { - KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); - auto node = get_node_lockless( node_id ); - - if( !node ) return; - - KOINOS_ASSERT( node_id != _root->id(), illegal_argument, "cannot discard root node" ); - - std::vector< state_node_id > remove_queue{ node_id }; - const auto& previdx = _index.template get< by_parent >(); - const auto head_id = _head->id(); - - for( uint32_t i = 0; i < remove_queue.size(); ++i ) - { - KOINOS_ASSERT( remove_queue[ i ] != head_id, cannot_discard, "cannot discard a node that would result in discarding of head" ); - - auto previtr = previdx.lower_bound( remove_queue[ i ] ); - while ( previtr != previdx.end() && (*previtr)->parent_id() == remove_queue[ i ] ) + KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); + auto node = get_node_lockless( node_id ); + KOINOS_ASSERT( node, illegal_argument, "node ${n} not found.", ( "n", node_id ) ); + + { + std::lock_guard< std::timed_mutex > index_lock( node->_impl->_state->cv_mutex() ); + + node->_impl->_state->finalize(); + } + + node->_impl->_state->cv().notify_all(); + + if( node->revision() > _head->revision() ) + { + _head = node->_impl->_state; + } + else if( node->revision() == _head->revision() ) + { + std::unique_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); + fork_list forks; + forks.reserve( _fork_heads.size() ); + std::transform( std::begin( _fork_heads ), + std::end( _fork_heads ), + std::back_inserter( forks ), + []( const auto& entry ) + { + state_node_ptr s = std::make_shared< state_node >(); + s->_impl->_state = entry.second; + return s; + } ); + + auto head = get_head_lockless(); + if( auto new_head = _comp( forks, head, node ); new_head != nullptr ) + { + _head = new_head->_impl->_state; + } + else + { + _head = head->parent()->_impl->_state; + auto head_itr = _fork_heads.find( head->id() ); + if( head_itr != std::end( _fork_heads ) ) + _fork_heads.erase( head_itr ); + _fork_heads.insert_or_assign( head->parent()->id(), _head ); + } + } + + // When node is finalized, parent node needs to be removed from heads, if it exists. + std::unique_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); + if( node->parent_id() != _head->id() ) + { + auto parent_itr = _fork_heads.find( node->parent_id() ); + if( parent_itr != std::end( _fork_heads ) ) + _fork_heads.erase( parent_itr ); + + _fork_heads.insert_or_assign( node->id(), node->_impl->_state ); + } +} + +void database_impl::discard_node( const state_node_id& node_id, + const std::unordered_set< state_node_id >& whitelist, + const shared_lock_ptr& lock ) +{ + KOINOS_ASSERT( verify_shared_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + std::unique_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); + discard_node_lockless( node_id, whitelist ); +} + +void database_impl::discard_node( const state_node_id& node_id, + const std::unordered_set< state_node_id >& whitelist, + const unique_lock_ptr& lock ) +{ + KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + std::unique_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); + discard_node_lockless( node_id, whitelist ); +} + +void database_impl::discard_node_lockless( const state_node_id& node_id, + const std::unordered_set< state_node_id >& whitelist ) +{ + KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); + auto node = get_node_lockless( node_id ); + + if( !node ) + return; + + KOINOS_ASSERT( node_id != _root->id(), illegal_argument, "cannot discard root node" ); + + std::vector< state_node_id > remove_queue{ node_id }; + const auto& previdx = _index.template get< by_parent >(); + const auto head_id = _head->id(); + + for( uint32_t i = 0; i < remove_queue.size(); ++i ) + { + KOINOS_ASSERT( remove_queue[ i ] != head_id, + cannot_discard, + "cannot discard a node that would result in discarding of head" ); + + auto previtr = previdx.lower_bound( remove_queue[ i ] ); + while( previtr != previdx.end() && ( *previtr )->parent_id() == remove_queue[ i ] ) + { + // Do not remove nodes on the whitelist + if( whitelist.find( ( *previtr )->id() ) == whitelist.end() ) { - // Do not remove nodes on the whitelist - if ( whitelist.find( (*previtr)->id() ) == whitelist.end() ) - { - remove_queue.push_back( (*previtr)->id() ); - } - - ++previtr; - } - - // We may discard one or more fork heads when discarding a minority fork tree - // For completeness, we'll check every node to see if it is a fork head - auto head_itr = _fork_heads.find( remove_queue[ i ] ); - if ( head_itr != _fork_heads.end() ) - { - _fork_heads.erase( head_itr ); + remove_queue.push_back( ( *previtr )->id() ); } - } - - for( const auto& id : remove_queue ) - { - auto itr = _index.find( id ); - if ( itr != _index.end() ) - _index.erase( itr ); - } - // When node is discarded, if the parent node is not a parent of other nodes (no forks), add it to heads. - auto fork_itr = previdx.find( node->parent_id() ); - if ( fork_itr == previdx.end() ) - { - auto parent_itr = _index.find( node->parent_id() ); - KOINOS_ASSERT( parent_itr != _index.end(), internal_error, "discarded parent node not found in node index" ); - _fork_heads.insert_or_assign( (*parent_itr)->id(), *parent_itr ); - } + ++previtr; + } + + // We may discard one or more fork heads when discarding a minority fork tree + // For completeness, we'll check every node to see if it is a fork head + auto head_itr = _fork_heads.find( remove_queue[ i ] ); + if( head_itr != _fork_heads.end() ) + { + _fork_heads.erase( head_itr ); + } + } + + for( const auto& id: remove_queue ) + { + auto itr = _index.find( id ); + if( itr != _index.end() ) + _index.erase( itr ); + } + + // When node is discarded, if the parent node is not a parent of other nodes (no forks), add it to heads. + auto fork_itr = previdx.find( node->parent_id() ); + if( fork_itr == previdx.end() ) + { + auto parent_itr = _index.find( node->parent_id() ); + KOINOS_ASSERT( parent_itr != _index.end(), internal_error, "discarded parent node not found in node index" ); + _fork_heads.insert_or_assign( ( *parent_itr )->id(), *parent_itr ); + } } void database_impl::commit_node( const state_node_id& node_id, const unique_lock_ptr& lock ) { - KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" );; - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - std::unique_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); - KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); + KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); + ; + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + std::unique_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); + KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); - // If the node_id to commit is the root id, return. It is already committed. - if ( node_id == _root->id() ) - return; + // If the node_id to commit is the root id, return. It is already committed. + if( node_id == _root->id() ) + return; - auto node = get_node_lockless( node_id ); - KOINOS_ASSERT( node, illegal_argument, "node ${n} not found", ("n", node_id) ); + auto node = get_node_lockless( node_id ); + KOINOS_ASSERT( node, illegal_argument, "node ${n} not found", ( "n", node_id ) ); - auto old_root = _root; - _root = node->_impl->_state; + auto old_root = _root; + _root = node->_impl->_state; - _index.modify( _index.find( node_id ), []( state_delta_ptr& n ){ n->commit(); } ); + _index.modify( _index.find( node_id ), + []( state_delta_ptr& n ) + { + n->commit(); + } ); - std::unordered_set< state_node_id > whitelist{ node_id }; - discard_node_lockless( old_root->id(), whitelist ); + std::unordered_set< state_node_id > whitelist{ node_id }; + discard_node_lockless( old_root->id(), whitelist ); } state_node_ptr database_impl::get_head( const shared_lock_ptr& lock ) const { - KOINOS_ASSERT( verify_shared_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + KOINOS_ASSERT( verify_shared_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - auto head = get_head_lockless(); - if ( head ) - head->_impl->_lock = lock; + auto head = get_head_lockless(); + if( head ) + head->_impl->_lock = lock; - return head; + return head; } state_node_ptr database_impl::get_head( const unique_lock_ptr& lock ) const { - KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - auto head = get_head_lockless(); + auto head = get_head_lockless(); - return head; + return head; } state_node_ptr database_impl::get_head_lockless() const { - KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); - auto head = std::make_shared< state_node >(); - head->_impl->_state = _head; - return head; + KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); + auto head = std::make_shared< state_node >(); + head->_impl->_state = _head; + return head; } std::vector< state_node_ptr > database_impl::get_fork_heads( const shared_lock_ptr& lock ) const { - KOINOS_ASSERT( verify_shared_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - std::shared_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); - KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); - std::vector< state_node_ptr > fork_heads; - fork_heads.reserve( _fork_heads.size() ); + KOINOS_ASSERT( verify_shared_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + std::shared_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); + KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); + std::vector< state_node_ptr > fork_heads; + fork_heads.reserve( _fork_heads.size() ); - for( auto& head : _fork_heads ) - { - auto fork_head = std::make_shared< state_node >(); - fork_head->_impl->_state = head.second; - fork_head->_impl->_lock = lock; - fork_heads.push_back( fork_head ); - } + for( auto& head: _fork_heads ) + { + auto fork_head = std::make_shared< state_node >(); + fork_head->_impl->_state = head.second; + fork_head->_impl->_lock = lock; + fork_heads.push_back( fork_head ); + } - return fork_heads; + return fork_heads; } std::vector< state_node_ptr > database_impl::get_fork_heads( const unique_lock_ptr& lock ) const { - KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - std::shared_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); - KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); - std::vector< state_node_ptr > fork_heads; - fork_heads.reserve( _fork_heads.size() ); + KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + std::shared_lock< std::shared_mutex > fork_heads_lock( _fork_heads_mutex ); + KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); + std::vector< state_node_ptr > fork_heads; + fork_heads.reserve( _fork_heads.size() ); - for( auto& head : _fork_heads ) - { - auto fork_head = std::make_shared< state_node >(); - fork_head->_impl->_state = head.second; - fork_heads.push_back( fork_head ); - } + for( auto& head: _fork_heads ) + { + auto fork_head = std::make_shared< state_node >(); + fork_head->_impl->_state = head.second; + fork_heads.push_back( fork_head ); + } - return fork_heads; + return fork_heads; } std::vector< state_node_ptr > database_impl::get_all_nodes( const shared_lock_ptr& lock ) const { - KOINOS_ASSERT( verify_shared_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); - std::vector< state_node_ptr > nodes; - nodes.reserve( _index.size() ); + KOINOS_ASSERT( verify_shared_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); + std::vector< state_node_ptr > nodes; + nodes.reserve( _index.size() ); - for ( const auto& delta : _index ) - { - auto node = std::make_shared< state_node >(); - node->_impl->_state = delta; - node->_impl->_lock = lock; - nodes.push_back( node ); - } + for( const auto& delta: _index ) + { + auto node = std::make_shared< state_node >(); + node->_impl->_state = delta; + node->_impl->_lock = lock; + nodes.push_back( node ); + } - return nodes; + return nodes; } std::vector< state_node_ptr > database_impl::get_all_nodes( const unique_lock_ptr& lock ) const { - KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); - std::vector< state_node_ptr > nodes; - nodes.reserve( _index.size() ); + KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); + std::vector< state_node_ptr > nodes; + nodes.reserve( _index.size() ); - for ( const auto& delta : _index ) - { - auto node = std::make_shared< state_node >(); - node->_impl->_state = delta; - nodes.push_back( node ); - } + for( const auto& delta: _index ) + { + auto node = std::make_shared< state_node >(); + node->_impl->_state = delta; + nodes.push_back( node ); + } - return nodes; + return nodes; } state_node_ptr database_impl::get_root( const shared_lock_ptr& lock ) const { - KOINOS_ASSERT( verify_shared_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + KOINOS_ASSERT( verify_shared_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - auto root = get_root_lockless(); - if ( root ) - root->_impl->_lock = lock; + auto root = get_root_lockless(); + if( root ) + root->_impl->_lock = lock; - return root; + return root; } state_node_ptr database_impl::get_root( const unique_lock_ptr& lock ) const { - KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); - std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); + KOINOS_ASSERT( verify_unique_lock( lock ), illegal_argument, "database is not properly locked" ); + std::lock_guard< std::timed_mutex > index_lock( _index_mutex ); - auto root = get_root_lockless(); + auto root = get_root_lockless(); - return root; + return root; } state_node_ptr database_impl::get_root_lockless() const { - KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); - auto root = std::make_shared< state_node >(); - root->_impl->_state = _root; - return root; + KOINOS_ASSERT( is_open(), database_not_open, "database is not open" ); + auto root = std::make_shared< state_node >(); + root->_impl->_state = _root; + return root; } bool database_impl::is_open() const { - return (bool)_root && (bool)_head; + return (bool)_root && (bool)_head; } const object_value* state_node_impl::get_object( const object_space& space, const object_key& key ) const { - chain::database_key db_key; - *db_key.mutable_space() = space; - db_key.set_key( key ); - auto key_string = util::converter::as< std::string >( db_key ); + chain::database_key db_key; + *db_key.mutable_space() = space; + db_key.set_key( key ); + auto key_string = util::converter::as< std::string >( db_key ); - auto pobj = merge_state( _state ).find( key_string ); + auto pobj = merge_state( _state ).find( key_string ); - if( pobj != nullptr ) - { - return pobj; - } + if( pobj != nullptr ) + { + return pobj; + } - return nullptr; + return nullptr; } -std::pair< const object_value*, const object_key > state_node_impl::get_next_object( const object_space& space, const object_key& key ) const +std::pair< const object_value*, const object_key > state_node_impl::get_next_object( const object_space& space, + const object_key& key ) const { - chain::database_key db_key; - *db_key.mutable_space() = space; - db_key.set_key( key ); - auto key_string = util::converter::as< std::string >( db_key ); + chain::database_key db_key; + *db_key.mutable_space() = space; + db_key.set_key( key ); + auto key_string = util::converter::as< std::string >( db_key ); - auto state = merge_state( _state ); - auto it = state.lower_bound( key_string ); + auto state = merge_state( _state ); + auto it = state.lower_bound( key_string ); - if ( it != state.end() && it.key() == key_string ) - { - it++; - } + if( it != state.end() && it.key() == key_string ) + { + it++; + } - if( it != state.end() ) - { - chain::database_key next_key = util::converter::to< chain::database_key >( it.key() ); + if( it != state.end() ) + { + chain::database_key next_key = util::converter::to< chain::database_key >( it.key() ); - if ( next_key.space() == space ) - { - return { &*it, next_key.key() }; - } - } + if( next_key.space() == space ) + { + return { &*it, next_key.key() }; + } + } - return { nullptr, null_key }; + return { nullptr, null_key }; } -std::pair< const object_value*, const object_key > state_node_impl::get_prev_object( const object_space& space, const object_key& key ) const +std::pair< const object_value*, const object_key > state_node_impl::get_prev_object( const object_space& space, + const object_key& key ) const { - chain::database_key db_key; - *db_key.mutable_space() = space; - db_key.set_key( key ); - auto key_string = util::converter::as< std::string >( db_key ); + chain::database_key db_key; + *db_key.mutable_space() = space; + db_key.set_key( key ); + auto key_string = util::converter::as< std::string >( db_key ); - auto state = merge_state( _state ); - auto it = state.lower_bound( key_string ); + auto state = merge_state( _state ); + auto it = state.lower_bound( key_string ); - if( it != state.begin() ) - { - --it; - chain::database_key next_key = util::converter::to< chain::database_key >( it.key() ); + if( it != state.begin() ) + { + --it; + chain::database_key next_key = util::converter::to< chain::database_key >( it.key() ); - if ( next_key.space() == space ) - { - return { &*it, next_key.key() }; - } - } + if( next_key.space() == space ) + { + return { &*it, next_key.key() }; + } + } - return { nullptr, null_key }; + return { nullptr, null_key }; } int64_t state_node_impl::put_object( const object_space& space, const object_key& key, const object_value* val ) { - KOINOS_ASSERT( !_state->is_finalized(), node_finalized, "cannot write to a finalized node" ); + KOINOS_ASSERT( !_state->is_finalized(), node_finalized, "cannot write to a finalized node" ); - chain::database_key db_key; - *db_key.mutable_space() = space; - db_key.set_key( key ); - auto key_string = util::converter::as< std::string >( db_key ); + chain::database_key db_key; + *db_key.mutable_space() = space; + db_key.set_key( key ); + auto key_string = util::converter::as< std::string >( db_key ); - int64_t bytes_used = 0; - auto pobj = merge_state( _state ).find( key_string ); + int64_t bytes_used = 0; + auto pobj = merge_state( _state ).find( key_string ); - if ( pobj != nullptr ) - bytes_used -= pobj->size(); - else - bytes_used += key_string.size(); + if( pobj != nullptr ) + bytes_used -= pobj->size(); + else + bytes_used += key_string.size(); - bytes_used += val->size(); - _state->put( key_string, *val ); + bytes_used += val->size(); + _state->put( key_string, *val ); - return bytes_used; + return bytes_used; } int64_t state_node_impl::remove_object( const object_space& space, const object_key& key ) { - KOINOS_ASSERT( !_state->is_finalized(), node_finalized, "cannot write to a finalized node" ); + KOINOS_ASSERT( !_state->is_finalized(), node_finalized, "cannot write to a finalized node" ); - chain::database_key db_key; - *db_key.mutable_space() = space; - db_key.set_key( key ); - auto key_string = util::converter::as< std::string >( db_key ); + chain::database_key db_key; + *db_key.mutable_space() = space; + db_key.set_key( key ); + auto key_string = util::converter::as< std::string >( db_key ); - int64_t bytes_used = 0; - auto pobj = merge_state( _state ).find( key_string ); + int64_t bytes_used = 0; + auto pobj = merge_state( _state ).find( key_string ); - if ( pobj != nullptr ) - { - bytes_used -= pobj->size(); - bytes_used -= key_string.size(); - } + if( pobj != nullptr ) + { + bytes_used -= pobj->size(); + bytes_used -= key_string.size(); + } - _state->erase( key_string ); + _state->erase( key_string ); - return bytes_used; + return bytes_used; } crypto::multihash state_node_impl::merkle_root() const { - return _state->merkle_root(); + return _state->merkle_root(); } std::vector< protocol::state_delta_entry > state_node_impl::get_delta_entries() const { - return _state->get_delta_entries(); + return _state->get_delta_entries(); } -} // detail +} // namespace detail + +abstract_state_node::abstract_state_node(): + _impl( new detail::state_node_impl() ) +{} -abstract_state_node::abstract_state_node() : _impl( new detail::state_node_impl() ) {} abstract_state_node::~abstract_state_node() {} const object_value* abstract_state_node::get_object( const object_space& space, const object_key& key ) const { - return _impl->get_object( space, key ); + return _impl->get_object( space, key ); } -std::pair< const object_value*, const object_key > abstract_state_node::get_next_object( const object_space& space, const object_key& key ) const +std::pair< const object_value*, const object_key > abstract_state_node::get_next_object( const object_space& space, + const object_key& key ) const { - return _impl->get_next_object( space, key ); + return _impl->get_next_object( space, key ); } -std::pair< const object_value*, const object_key > abstract_state_node::get_prev_object( const object_space& space, const object_key& key ) const +std::pair< const object_value*, const object_key > abstract_state_node::get_prev_object( const object_space& space, + const object_key& key ) const { - return _impl->get_prev_object( space, key ); + return _impl->get_prev_object( space, key ); } int64_t abstract_state_node::put_object( const object_space& space, const object_key& key, const object_value* val ) { - return _impl->put_object( space, key, val ); + return _impl->put_object( space, key, val ); } int64_t abstract_state_node::remove_object( const object_space& space, const object_key& key ) { - return _impl->remove_object( space, key ); + return _impl->remove_object( space, key ); } bool abstract_state_node::is_finalized() const { - return _impl->_state->is_finalized(); + return _impl->_state->is_finalized(); } crypto::multihash abstract_state_node::merkle_root() const { - KOINOS_ASSERT( is_finalized(), koinos::exception, "node must be finalized to calculate merkle root" ); - return _impl->merkle_root(); + KOINOS_ASSERT( is_finalized(), koinos::exception, "node must be finalized to calculate merkle root" ); + return _impl->merkle_root(); } std::vector< protocol::state_delta_entry > abstract_state_node::get_delta_entries() const { - return _impl->get_delta_entries(); + return _impl->get_delta_entries(); } anonymous_state_node_ptr abstract_state_node::create_anonymous_node() { - auto anonymous_node = std::make_shared< anonymous_state_node >(); - anonymous_node->_parent = shared_from_derived(); - anonymous_node->_impl->_state = _impl->_state->make_child(); - anonymous_node->_impl->_lock = _impl->_lock; - return anonymous_node; + auto anonymous_node = std::make_shared< anonymous_state_node >(); + anonymous_node->_parent = shared_from_derived(); + anonymous_node->_impl->_state = _impl->_state->make_child(); + anonymous_node->_impl->_lock = _impl->_lock; + return anonymous_node; } -state_node::state_node() : abstract_state_node() {} +state_node::state_node(): + abstract_state_node() +{} + state_node::~state_node() {} const state_node_id& state_node::id() const { - return _impl->_state->id(); + return _impl->_state->id(); } const state_node_id& state_node::parent_id() const { - return _impl->_state->parent_id(); + return _impl->_state->parent_id(); } uint64_t state_node::revision() const { - return _impl->_state->revision(); + return _impl->_state->revision(); } abstract_state_node_ptr state_node::parent() const { - auto parent_delta = _impl->_state->parent(); - if ( parent_delta ) - { - auto parent_node = std::make_shared< state_node >(); - parent_node->_impl->_state = parent_delta; - parent_node->_impl->_lock = _impl->_lock; - return parent_node; - } + auto parent_delta = _impl->_state->parent(); + if( parent_delta ) + { + auto parent_node = std::make_shared< state_node >(); + parent_node->_impl->_state = parent_delta; + parent_node->_impl->_lock = _impl->_lock; + return parent_node; + } - return abstract_state_node_ptr(); + return abstract_state_node_ptr(); } const protocol::block_header& state_node::block_header() const { - return _impl->_state->block_header(); + return _impl->_state->block_header(); } abstract_state_node_ptr state_node::shared_from_derived() { - return shared_from_this(); + return shared_from_this(); } -anonymous_state_node::anonymous_state_node() : abstract_state_node() {} +anonymous_state_node::anonymous_state_node(): + abstract_state_node() +{} + anonymous_state_node::anonymous_state_node::~anonymous_state_node() {} const state_node_id& anonymous_state_node::id() const { - return _parent->id(); + return _parent->id(); } const state_node_id& anonymous_state_node::parent_id() const { - return _parent->parent_id(); + return _parent->parent_id(); } uint64_t anonymous_state_node::revision() const { - return _parent->revision(); + return _parent->revision(); } abstract_state_node_ptr anonymous_state_node::parent() const { - return _parent; + return _parent; } const protocol::block_header& anonymous_state_node::block_header() const { - return _parent->block_header(); + return _parent->block_header(); } void anonymous_state_node::commit() { - KOINOS_ASSERT( !_parent->is_finalized(), node_finalized, "cannot commit to a finalized node" ); - _impl->_state->squash(); - reset(); + KOINOS_ASSERT( !_parent->is_finalized(), node_finalized, "cannot commit to a finalized node" ); + _impl->_state->squash(); + reset(); } void anonymous_state_node::reset() { - _impl->_state = _impl->_state->make_child(); + _impl->_state = _impl->_state->make_child(); } abstract_state_node_ptr anonymous_state_node::shared_from_derived() { - return shared_from_this(); + return shared_from_this(); } - state_node_ptr fifo_comparator( fork_list& forks, state_node_ptr current_head, state_node_ptr new_head ) { - return current_head; + return current_head; } state_node_ptr block_time_comparator( fork_list& forks, state_node_ptr head_block, state_node_ptr new_block ) { - return new_block->block_header().timestamp() < head_block->block_header().timestamp() ? new_block : head_block; + return new_block->block_header().timestamp() < head_block->block_header().timestamp() ? new_block : head_block; } state_node_ptr pob_comparator( fork_list& forks, state_node_ptr head_block, state_node_ptr new_block ) { - if ( head_block->block_header().signer() != new_block->block_header().signer() ) - return new_block->block_header().timestamp() < head_block->block_header().timestamp() ? new_block : head_block; - - auto it = std::find_if( std::begin( forks ), std::end( forks ), [&]( state_node_ptr p ) { return p->id() == head_block->id(); } ); - if ( it != std::end( forks ) ) - forks.erase( it ); - - struct { - bool operator()( abstract_state_node_ptr a, abstract_state_node_ptr b ) const - { - if ( a->revision() > b->revision() ) - return true; - else if ( a->revision() < b->revision() ) - return false; + if( head_block->block_header().signer() != new_block->block_header().signer() ) + return new_block->block_header().timestamp() < head_block->block_header().timestamp() ? new_block : head_block; + + auto it = std::find_if( std::begin( forks ), + std::end( forks ), + [ & ]( state_node_ptr p ) + { + return p->id() == head_block->id(); + } ); + if( it != std::end( forks ) ) + forks.erase( it ); + + struct + { + bool operator()( abstract_state_node_ptr a, abstract_state_node_ptr b ) const + { + if( a->revision() > b->revision() ) + return true; + else if( a->revision() < b->revision() ) + return false; + + if( a->block_header().timestamp() < b->block_header().timestamp() ) + return true; + else if( a->block_header().timestamp() > b->block_header().timestamp() ) + return false; + + if( a->id() < b->id() ) + return true; - if ( a->block_header().timestamp() < b->block_header().timestamp() ) - return true; - else if ( a->block_header().timestamp() > b->block_header().timestamp() ) - return false; - - if ( a->id() < b->id() ) - return true; - - return false; - } - } priority_algorithm; + return false; + } + } priority_algorithm; - if ( std::size( forks ) ) - { - std::sort( std::begin( forks ), std::end( forks ), priority_algorithm ); - it = std::begin( forks ); - return priority_algorithm( head_block->parent(), *it ) ? state_node_ptr() : *it; - } + if( std::size( forks ) ) + { + std::sort( std::begin( forks ), std::end( forks ), priority_algorithm ); + it = std::begin( forks ); + return priority_algorithm( head_block->parent(), *it ) ? state_node_ptr() : *it; + } - return state_node_ptr(); + return state_node_ptr(); } -database::database() : impl( new detail::database_impl() ) {} +database::database(): + impl( new detail::database_impl() ) +{} + database::~database() {} shared_lock_ptr database::get_shared_lock() const { - return impl->get_shared_lock(); + return impl->get_shared_lock(); } unique_lock_ptr database::get_unique_lock() const { - return impl->get_unique_lock(); + return impl->get_unique_lock(); } -void database::open( const std::optional< std::filesystem::path >& p, genesis_init_function init, fork_resolution_algorithm algo, const unique_lock_ptr& lock ) +void database::open( const std::optional< std::filesystem::path >& p, + genesis_init_function init, + fork_resolution_algorithm algo, + const unique_lock_ptr& lock ) { - impl->open( p, init, algo, lock ? lock : get_unique_lock() ); + impl->open( p, init, algo, lock ? lock : get_unique_lock() ); } -void database::open( const std::optional< std::filesystem::path >& p, genesis_init_function init, state_node_comparator_function comp, const unique_lock_ptr& lock ) +void database::open( const std::optional< std::filesystem::path >& p, + genesis_init_function init, + state_node_comparator_function comp, + const unique_lock_ptr& lock ) { - impl->open( p, init, comp, lock ? lock : get_unique_lock() ); + impl->open( p, init, comp, lock ? lock : get_unique_lock() ); } void database::close( const unique_lock_ptr& lock ) { - impl->close( lock ? lock : get_unique_lock() ); + impl->close( lock ? lock : get_unique_lock() ); } void database::reset( const unique_lock_ptr& lock ) { - impl->reset( lock ? lock : get_unique_lock() ); + impl->reset( lock ? lock : get_unique_lock() ); } -state_node_ptr database::get_node_at_revision( uint64_t revision, const state_node_id& child_id, const shared_lock_ptr& lock ) const +state_node_ptr +database::get_node_at_revision( uint64_t revision, const state_node_id& child_id, const shared_lock_ptr& lock ) const { - return impl->get_node_at_revision( revision, child_id, lock ); + return impl->get_node_at_revision( revision, child_id, lock ); } state_node_ptr database::get_node_at_revision( uint64_t revision, const shared_lock_ptr& lock ) const { - static const state_node_id null_id; - return impl->get_node_at_revision( revision, null_id, lock ); + static const state_node_id null_id; + return impl->get_node_at_revision( revision, null_id, lock ); } -state_node_ptr database::get_node_at_revision( uint64_t revision, const state_node_id& child_id, const unique_lock_ptr& lock ) const +state_node_ptr +database::get_node_at_revision( uint64_t revision, const state_node_id& child_id, const unique_lock_ptr& lock ) const { - return impl->get_node_at_revision( revision, child_id, lock ); + return impl->get_node_at_revision( revision, child_id, lock ); } state_node_ptr database::get_node_at_revision( uint64_t revision, const unique_lock_ptr& lock ) const { - static const state_node_id null_id; - return impl->get_node_at_revision( revision, null_id, lock ); + static const state_node_id null_id; + return impl->get_node_at_revision( revision, null_id, lock ); } state_node_ptr database::get_node( const state_node_id& node_id, const shared_lock_ptr& lock ) const { - return impl->get_node( node_id, lock ); + return impl->get_node( node_id, lock ); } state_node_ptr database::get_node( const state_node_id& node_id, const unique_lock_ptr& lock ) const { - return impl->get_node( node_id, lock ); + return impl->get_node( node_id, lock ); } -state_node_ptr database::create_writable_node( const state_node_id& parent_id, const state_node_id& new_id, const protocol::block_header& header, const shared_lock_ptr& lock ) +state_node_ptr database::create_writable_node( const state_node_id& parent_id, + const state_node_id& new_id, + const protocol::block_header& header, + const shared_lock_ptr& lock ) { - return impl->create_writable_node( parent_id, new_id, header, lock ); + return impl->create_writable_node( parent_id, new_id, header, lock ); } -state_node_ptr database::create_writable_node( const state_node_id& parent_id, const state_node_id& new_id, const protocol::block_header& header, const unique_lock_ptr& lock ) +state_node_ptr database::create_writable_node( const state_node_id& parent_id, + const state_node_id& new_id, + const protocol::block_header& header, + const unique_lock_ptr& lock ) { - return impl->create_writable_node( parent_id, new_id, header, lock ); + return impl->create_writable_node( parent_id, new_id, header, lock ); } -state_node_ptr database::clone_node( const state_node_id& node_id, const state_node_id& new_id, const protocol::block_header& header, const shared_lock_ptr& lock ) +state_node_ptr database::clone_node( const state_node_id& node_id, + const state_node_id& new_id, + const protocol::block_header& header, + const shared_lock_ptr& lock ) { - return impl->clone_node( node_id, new_id, header, lock ); + return impl->clone_node( node_id, new_id, header, lock ); } -state_node_ptr database::clone_node( const state_node_id& node_id, const state_node_id& new_id, const protocol::block_header& header, const unique_lock_ptr& lock ) +state_node_ptr database::clone_node( const state_node_id& node_id, + const state_node_id& new_id, + const protocol::block_header& header, + const unique_lock_ptr& lock ) { - return impl->clone_node( node_id, new_id, header, lock ); + return impl->clone_node( node_id, new_id, header, lock ); } void database::finalize_node( const state_node_id& node_id, const shared_lock_ptr& lock ) { - impl->finalize_node( node_id, lock ); + impl->finalize_node( node_id, lock ); } void database::finalize_node( const state_node_id& node_id, const unique_lock_ptr& lock ) { - impl->finalize_node( node_id, lock ); + impl->finalize_node( node_id, lock ); } void database::discard_node( const state_node_id& node_id, const shared_lock_ptr& lock ) { - static const std::unordered_set< state_node_id > whitelist; - impl->discard_node( node_id, whitelist, lock ); + static const std::unordered_set< state_node_id > whitelist; + impl->discard_node( node_id, whitelist, lock ); } void database::discard_node( const state_node_id& node_id, const unique_lock_ptr& lock ) { - static const std::unordered_set< state_node_id > whitelist; - impl->discard_node( node_id, whitelist, lock ); + static const std::unordered_set< state_node_id > whitelist; + impl->discard_node( node_id, whitelist, lock ); } void database::commit_node( const state_node_id& node_id, const unique_lock_ptr& lock ) { - impl->commit_node( node_id, lock ? lock : get_unique_lock() ); + impl->commit_node( node_id, lock ? lock : get_unique_lock() ); } state_node_ptr database::get_head( const shared_lock_ptr& lock ) const { - return impl->get_head( lock ); + return impl->get_head( lock ); } state_node_ptr database::get_head( const unique_lock_ptr& lock ) const { - return impl->get_head( lock ); + return impl->get_head( lock ); } std::vector< state_node_ptr > database::get_fork_heads( const shared_lock_ptr& lock ) const { - return impl->get_fork_heads( lock ); + return impl->get_fork_heads( lock ); } std::vector< state_node_ptr > database::get_fork_heads( const unique_lock_ptr& lock ) const { - return impl->get_fork_heads( lock ); + return impl->get_fork_heads( lock ); } std::vector< state_node_ptr > database::get_all_nodes( const shared_lock_ptr& lock ) const { - return impl->get_all_nodes( lock ); + return impl->get_all_nodes( lock ); } std::vector< state_node_ptr > database::get_all_nodes( const unique_lock_ptr& lock ) const { - return impl->get_all_nodes( lock ); + return impl->get_all_nodes( lock ); } state_node_ptr database::get_root( const shared_lock_ptr& lock ) const { - return impl->get_root( lock ); + return impl->get_root( lock ); } state_node_ptr database::get_root( const unique_lock_ptr& lock ) const { - return impl->get_root( lock ); + return impl->get_root( lock ); } -} // koinos::state_db +} // namespace koinos::state_db diff --git a/src/koinos/state_db/state_delta.cpp b/src/koinos/state_db/state_delta.cpp index 9c8367e..32571e6 100644 --- a/src/koinos/state_db/state_delta.cpp +++ b/src/koinos/state_db/state_delta.cpp @@ -9,365 +9,365 @@ using value_type = state_delta::value_type; state_delta::state_delta( const std::optional< std::filesystem::path >& p ) { - if ( p ) - { - auto backend = std::make_shared< backends::rocksdb::rocksdb_backend >(); - backend->open( *p ); - _backend = backend; - } - else - { - _backend = std::make_shared< backends::map::map_backend >(); - } - - _revision = _backend->revision(); - _id = _backend->id(); - _merkle_root = _backend->merkle_root(); + if( p ) + { + auto backend = std::make_shared< backends::rocksdb::rocksdb_backend >(); + backend->open( *p ); + _backend = backend; + } + else + { + _backend = std::make_shared< backends::map::map_backend >(); + } + + _revision = _backend->revision(); + _id = _backend->id(); + _merkle_root = _backend->merkle_root(); } void state_delta::put( const key_type& k, const value_type& v ) { - _backend->put( k, v ); + _backend->put( k, v ); } void state_delta::erase( const key_type& k ) { - if ( find( k ) ) - { - _backend->erase( k ); - _removed_objects.insert( k ); - } + if( find( k ) ) + { + _backend->erase( k ); + _removed_objects.insert( k ); + } } const value_type* state_delta::find( const key_type& key ) const { - if ( auto val_ptr = _backend->get( key ); val_ptr ) - return val_ptr; + if( auto val_ptr = _backend->get( key ); val_ptr ) + return val_ptr; - if ( is_removed( key ) ) - return nullptr; + if( is_removed( key ) ) + return nullptr; - return is_root() ? nullptr : _parent->find( key ); + return is_root() ? nullptr : _parent->find( key ); } void state_delta::squash() { - if ( is_root() ) - return; - - // If an object is removed here and exists in the parent, it needs to only be removed in the parent - // If an object is modified here, but removed in the parent, it needs to only be modified in the parent - // These are O(m log n) operations. Because of this, squash should only be called from anonymouse state - // nodes, whose modifications are much smaller - for ( const key_type& r_key : _removed_objects ) - { - _parent->_backend->erase( r_key ); - - if ( !_parent->is_root() ) - { - _parent->_removed_objects.insert( r_key ); - } - } - - for ( auto itr = _backend->begin(); itr != _backend->end(); ++itr ) - { - _parent->_backend->put( itr.key(), *itr ); - - if ( !_parent->is_root() ) - { - _parent->_removed_objects.erase( itr.key() ); - } - } + if( is_root() ) + return; + + // If an object is removed here and exists in the parent, it needs to only be removed in the parent + // If an object is modified here, but removed in the parent, it needs to only be modified in the parent + // These are O(m log n) operations. Because of this, squash should only be called from anonymouse state + // nodes, whose modifications are much smaller + for( const key_type& r_key: _removed_objects ) + { + _parent->_backend->erase( r_key ); + + if( !_parent->is_root() ) + { + _parent->_removed_objects.insert( r_key ); + } + } + + for( auto itr = _backend->begin(); itr != _backend->end(); ++itr ) + { + _parent->_backend->put( itr.key(), *itr ); + + if( !_parent->is_root() ) + { + _parent->_removed_objects.erase( itr.key() ); + } + } } void state_delta::commit() { - /** - * commit works in two distinct phases. The first is head recursion until we are at the root - * delta. At the root, we grab the backend and begin a write batch that will encompass all - * state writes and the final write of the metadata. - * - * The second phase is popping off the stack, writing state to the backend. After all deltas - * have been written to the backend, we write metadata to the backend and end the write batch. - * - * The result is this delta becomes the new root delta and state is written to the root backend - * atomically. - */ - KOINOS_ASSERT( !is_root(), internal_error, "cannot commit root" ); - - std::vector< std::shared_ptr< state_delta > > node_stack; - auto current_node = shared_from_this(); - - while ( current_node ) - { - node_stack.push_back( current_node ); - current_node = current_node->_parent; - } - - // Because we already asserted we were not root, there will always exist a minimum of two nodes in the stack, - // this and root. - auto backend = node_stack.back()->_backend; - node_stack.back()->_backend.reset(); - node_stack.pop_back(); - - // Start the write batch - backend->start_write_batch(); - - // While there are nodes on the stack, write them to the backend - while ( node_stack.size() ) - { - auto& node = node_stack.back(); - - for ( const key_type& r_key : node->_removed_objects ) - { - backend->erase( r_key ); - } - - for ( auto itr = node->_backend->begin(); itr != node->_backend->end(); ++itr ) - { - backend->put( itr.key(), *itr ); - } - - node_stack.pop_back(); - } - - // Update metadata on the backend - backend->set_block_header( block_header() ); - backend->set_revision( _revision ); - backend->set_id( _id ); - backend->set_merkle_root( merkle_root() ); - backend->store_metadata(); - - // End the write batch making the entire merge atomic - backend->end_write_batch(); - - // Reset local variables to match new status as root delta - _removed_objects.clear(); - _backend = backend; - _parent.reset(); + /** + * commit works in two distinct phases. The first is head recursion until we are at the root + * delta. At the root, we grab the backend and begin a write batch that will encompass all + * state writes and the final write of the metadata. + * + * The second phase is popping off the stack, writing state to the backend. After all deltas + * have been written to the backend, we write metadata to the backend and end the write batch. + * + * The result is this delta becomes the new root delta and state is written to the root backend + * atomically. + */ + KOINOS_ASSERT( !is_root(), internal_error, "cannot commit root" ); + + std::vector< std::shared_ptr< state_delta > > node_stack; + auto current_node = shared_from_this(); + + while( current_node ) + { + node_stack.push_back( current_node ); + current_node = current_node->_parent; + } + + // Because we already asserted we were not root, there will always exist a minimum of two nodes in the stack, + // this and root. + auto backend = node_stack.back()->_backend; + node_stack.back()->_backend.reset(); + node_stack.pop_back(); + + // Start the write batch + backend->start_write_batch(); + + // While there are nodes on the stack, write them to the backend + while( node_stack.size() ) + { + auto& node = node_stack.back(); + + for( const key_type& r_key: node->_removed_objects ) + { + backend->erase( r_key ); + } + + for( auto itr = node->_backend->begin(); itr != node->_backend->end(); ++itr ) + { + backend->put( itr.key(), *itr ); + } + + node_stack.pop_back(); + } + + // Update metadata on the backend + backend->set_block_header( block_header() ); + backend->set_revision( _revision ); + backend->set_id( _id ); + backend->set_merkle_root( merkle_root() ); + backend->store_metadata(); + + // End the write batch making the entire merge atomic + backend->end_write_batch(); + + // Reset local variables to match new status as root delta + _removed_objects.clear(); + _backend = backend; + _parent.reset(); } void state_delta::clear() { - _backend->clear(); - _removed_objects.clear(); + _backend->clear(); + _removed_objects.clear(); - _revision = 0; - _id = crypto::multihash::zero( crypto::multicodec::sha2_256 ); + _revision = 0; + _id = crypto::multihash::zero( crypto::multicodec::sha2_256 ); } bool state_delta::is_modified( const key_type& k ) const { - return _backend->get( k ) || _removed_objects.find( k ) != _removed_objects.end(); + return _backend->get( k ) || _removed_objects.find( k ) != _removed_objects.end(); } bool state_delta::is_removed( const key_type& k ) const { - return _removed_objects.find( k ) != _removed_objects.end(); + return _removed_objects.find( k ) != _removed_objects.end(); } bool state_delta::is_root() const { - return !_parent; + return !_parent; } uint64_t state_delta::revision() const { - return _revision; + return _revision; } void state_delta::set_revision( uint64_t revision ) { - _revision = revision; - if ( is_root() ) - { - _backend->set_revision( revision ); - } + _revision = revision; + if( is_root() ) + { + _backend->set_revision( revision ); + } } bool state_delta::is_finalized() const { - return _finalized; + return _finalized; } void state_delta::finalize() { - _finalized = true; + _finalized = true; } std::condition_variable_any& state_delta::cv() { - return _cv; + return _cv; } std::timed_mutex& state_delta::cv_mutex() { - return _cv_mutex; + return _cv_mutex; } crypto::multihash state_delta::merkle_root() const { - if ( !_merkle_root ) - { - std::vector< std::string > object_keys; - object_keys.reserve( _backend->size() + _removed_objects.size() ); - for ( auto itr = _backend->begin(); itr != _backend->end(); ++itr ) - { - object_keys.push_back( itr.key() ); - } - - for ( const auto& removed : _removed_objects ) - { - object_keys.push_back( removed ); - } - - std::sort( - object_keys.begin(), - object_keys.end() - ); - - std::vector< crypto::multihash > merkle_leafs; - merkle_leafs.reserve( object_keys.size() * 2 ); - - for ( const auto& key : object_keys ) - { - merkle_leafs.emplace_back( crypto::hash( crypto::multicodec::sha2_256, key ) ); - auto val_ptr = _backend->get( key ); - merkle_leafs.emplace_back( crypto::hash( crypto::multicodec::sha2_256, val_ptr ? *val_ptr : std::string() ) ); - } - - _merkle_root = crypto::merkle_tree( crypto::multicodec::sha2_256, merkle_leafs ).root()->hash(); - } - - return *_merkle_root; + if( !_merkle_root ) + { + std::vector< std::string > object_keys; + object_keys.reserve( _backend->size() + _removed_objects.size() ); + for( auto itr = _backend->begin(); itr != _backend->end(); ++itr ) + { + object_keys.push_back( itr.key() ); + } + + for( const auto& removed: _removed_objects ) + { + object_keys.push_back( removed ); + } + + std::sort( object_keys.begin(), object_keys.end() ); + + std::vector< crypto::multihash > merkle_leafs; + merkle_leafs.reserve( object_keys.size() * 2 ); + + for( const auto& key: object_keys ) + { + merkle_leafs.emplace_back( crypto::hash( crypto::multicodec::sha2_256, key ) ); + auto val_ptr = _backend->get( key ); + merkle_leafs.emplace_back( crypto::hash( crypto::multicodec::sha2_256, val_ptr ? *val_ptr : std::string() ) ); + } + + _merkle_root = crypto::merkle_tree( crypto::multicodec::sha2_256, merkle_leafs ).root()->hash(); + } + + return *_merkle_root; } const protocol::block_header& state_delta::block_header() const { - return _backend->block_header(); + return _backend->block_header(); } std::shared_ptr< state_delta > state_delta::make_child( const state_node_id& id, const protocol::block_header& header ) { - auto child = std::make_shared< state_delta >(); - child->_parent = shared_from_this(); - child->_id = id; - child->_revision = _revision + 1; - child->_backend = std::make_shared< backends::map::map_backend >(); - child->_backend->set_block_header( header ); - - return child; + auto child = std::make_shared< state_delta >(); + child->_parent = shared_from_this(); + child->_id = id; + child->_revision = _revision + 1; + child->_backend = std::make_shared< backends::map::map_backend >(); + child->_backend->set_block_header( header ); + + return child; } std::shared_ptr< state_delta > state_delta::clone( const state_node_id& id, const protocol::block_header& header ) { - auto new_node = std::make_shared< state_delta >(); - new_node->_parent = _parent; - new_node->_backend = _backend->clone(); - new_node->_removed_objects = _removed_objects; + auto new_node = std::make_shared< state_delta >(); + new_node->_parent = _parent; + new_node->_backend = _backend->clone(); + new_node->_removed_objects = _removed_objects; - new_node->_id = id; - new_node->_revision = _revision; - new_node->_merkle_root = _merkle_root; + new_node->_id = id; + new_node->_revision = _revision; + new_node->_merkle_root = _merkle_root; - new_node->_finalized = _finalized; + new_node->_finalized = _finalized; - new_node->_backend->set_id( id ); - new_node->_backend->set_revision( _revision ); - new_node->_backend->set_block_header( header ); + new_node->_backend->set_id( id ); + new_node->_backend->set_revision( _revision ); + new_node->_backend->set_block_header( header ); - if ( _merkle_root ) - { - new_node->_backend->set_merkle_root( *_merkle_root ); - } + if( _merkle_root ) + { + new_node->_backend->set_merkle_root( *_merkle_root ); + } - return new_node; + return new_node; } const std::shared_ptr< backend_type > state_delta::backend() const { - return _backend; + return _backend; } const state_node_id& state_delta::id() const { - return _id; + return _id; } const state_node_id& state_delta::parent_id() const { - static const state_node_id null_id; - return _parent ? _parent->_id : null_id; + static const state_node_id null_id; + return _parent ? _parent->_id : null_id; } std::shared_ptr< state_delta > state_delta::parent() const { - return _parent; + return _parent; } bool state_delta::is_empty() const { - if ( _backend->size() ) - return false; - else if ( _parent ) - return _parent->is_empty(); + if( _backend->size() ) + return false; + else if( _parent ) + return _parent->is_empty(); - return true; + return true; } std::shared_ptr< state_delta > state_delta::get_root() { - if ( !is_root() ) - { - if ( _parent->is_root() ) - return _parent; - else - return _parent->get_root(); - } - - return std::shared_ptr< state_delta >(); + if( !is_root() ) + { + if( _parent->is_root() ) + return _parent; + else + return _parent->get_root(); + } + + return std::shared_ptr< state_delta >(); } std::vector< protocol::state_delta_entry > state_delta::get_delta_entries() const { - std::vector< std::string > object_keys; - object_keys.reserve( _backend->size() + _removed_objects.size() ); - for ( auto itr = _backend->begin(); itr != _backend->end(); ++itr ) { - object_keys.push_back( itr.key() ); - } - - for ( const auto &removed : _removed_objects ) { - object_keys.push_back( removed ); - } - - std::sort( object_keys.begin(), object_keys.end() ); - - std::vector< protocol::state_delta_entry > deltas; - deltas.reserve( object_keys.size() ); - - for ( const auto &key : object_keys ) { - protocol::state_delta_entry entry; - - // Deserialize the key into a database_key object - koinos::chain::database_key db_key; - if ( db_key.ParseFromString( key ) ) - { - entry.mutable_object_space()->set_system( db_key.space().system() ); - entry.mutable_object_space()->set_zone( db_key.space().zone() ); - entry.mutable_object_space()->set_id( db_key.space().id() ); - - entry.set_key( db_key.key() ); - auto value = _backend->get( key ); - - // Set the optional field if not null - if ( value != nullptr ) - entry.set_value( *value ); - - deltas.push_back( entry ); - } - } - - return deltas; + std::vector< std::string > object_keys; + object_keys.reserve( _backend->size() + _removed_objects.size() ); + for( auto itr = _backend->begin(); itr != _backend->end(); ++itr ) + { + object_keys.push_back( itr.key() ); + } + + for( const auto& removed: _removed_objects ) + { + object_keys.push_back( removed ); + } + + std::sort( object_keys.begin(), object_keys.end() ); + + std::vector< protocol::state_delta_entry > deltas; + deltas.reserve( object_keys.size() ); + + for( const auto& key: object_keys ) + { + protocol::state_delta_entry entry; + + // Deserialize the key into a database_key object + koinos::chain::database_key db_key; + if( db_key.ParseFromString( key ) ) + { + entry.mutable_object_space()->set_system( db_key.space().system() ); + entry.mutable_object_space()->set_zone( db_key.space().zone() ); + entry.mutable_object_space()->set_id( db_key.space().id() ); + + entry.set_key( db_key.key() ); + auto value = _backend->get( key ); + + // Set the optional field if not null + if( value != nullptr ) + entry.set_value( *value ); + + deltas.push_back( entry ); + } + } + + return deltas; } -} // koinos::state_db::detail +} // namespace koinos::state_db::detail diff --git a/src/koinos/state_db/state_delta.hpp b/src/koinos/state_db/state_delta.hpp index 817b702..82f1381 100644 --- a/src/koinos/state_db/state_delta.hpp +++ b/src/koinos/state_db/state_delta.hpp @@ -16,73 +16,74 @@ namespace koinos::state_db::detail { - class state_delta : public std::enable_shared_from_this< state_delta > - { - public: - using backend_type = backends::abstract_backend; - using key_type = backend_type::key_type; - using value_type = backend_type::value_type; +class state_delta: public std::enable_shared_from_this< state_delta > +{ +public: + using backend_type = backends::abstract_backend; + using key_type = backend_type::key_type; + using value_type = backend_type::value_type; - private: - std::shared_ptr< state_delta > _parent; +private: + std::shared_ptr< state_delta > _parent; - std::shared_ptr< backend_type > _backend; - std::unordered_set< key_type > _removed_objects; + std::shared_ptr< backend_type > _backend; + std::unordered_set< key_type > _removed_objects; - state_node_id _id; - uint64_t _revision = 0; - mutable std::optional< crypto::multihash > _merkle_root; + state_node_id _id; + uint64_t _revision = 0; + mutable std::optional< crypto::multihash > _merkle_root; - bool _finalized = false; + bool _finalized = false; - std::timed_mutex _cv_mutex; - std::condition_variable_any _cv; + std::timed_mutex _cv_mutex; + std::condition_variable_any _cv; - public: - state_delta() = default; - state_delta( const std::optional< std::filesystem::path >& p ); - ~state_delta() = default; +public: + state_delta() = default; + state_delta( const std::optional< std::filesystem::path >& p ); + ~state_delta() = default; - void put( const key_type& k, const value_type& v ); - void erase( const key_type& k ); - const value_type* find( const key_type& key ) const; + void put( const key_type& k, const value_type& v ); + void erase( const key_type& k ); + const value_type* find( const key_type& key ) const; - void squash(); - void commit(); + void squash(); + void commit(); - void clear(); + void clear(); - bool is_modified( const key_type& k ) const; - bool is_removed( const key_type& k ) const; - bool is_root() const; - bool is_empty() const; + bool is_modified( const key_type& k ) const; + bool is_removed( const key_type& k ) const; + bool is_root() const; + bool is_empty() const; - uint64_t revision() const; - void set_revision( uint64_t revision ); + uint64_t revision() const; + void set_revision( uint64_t revision ); - bool is_finalized() const; - void finalize(); + bool is_finalized() const; + void finalize(); - std::condition_variable_any& cv(); - std::timed_mutex& cv_mutex(); + std::condition_variable_any& cv(); + std::timed_mutex& cv_mutex(); - crypto::multihash merkle_root() const; - std::vector< protocol::state_delta_entry > get_delta_entries() const; + crypto::multihash merkle_root() const; + std::vector< protocol::state_delta_entry > get_delta_entries() const; - const state_node_id& id() const; - const state_node_id& parent_id() const; - std::shared_ptr< state_delta > parent() const; - const protocol::block_header& block_header() const; + const state_node_id& id() const; + const state_node_id& parent_id() const; + std::shared_ptr< state_delta > parent() const; + const protocol::block_header& block_header() const; - std::shared_ptr< state_delta > make_child( const state_node_id& id = state_node_id(), const protocol::block_header& header = protocol::block_header() ); - std::shared_ptr< state_delta > clone( const state_node_id& id, const protocol::block_header& header ); - - const std::shared_ptr< backend_type > backend() const; + std::shared_ptr< state_delta > make_child( const state_node_id& id = state_node_id(), + const protocol::block_header& header = protocol::block_header() ); + std::shared_ptr< state_delta > clone( const state_node_id& id, const protocol::block_header& header ); - private: - void commit_helper(); + const std::shared_ptr< backend_type > backend() const; - std::shared_ptr< state_delta > get_root(); - }; +private: + void commit_helper(); -} // koinos::state_db::detail + std::shared_ptr< state_delta > get_root(); +}; + +} // namespace koinos::state_db::detail diff --git a/tests/main.cpp b/tests/main.cpp index 339b70a..614368d 100644 --- a/tests/main.cpp +++ b/tests/main.cpp @@ -1,3 +1,3 @@ #define BOOST_TEST_MODULE koinos_state_db_tests -#include #include +#include diff --git a/tests/state_db_test.cpp b/tests/state_db_test.cpp index 75a095c..cd2f53a 100644 --- a/tests/state_db_test.cpp +++ b/tests/state_db_test.cpp @@ -3,20 +3,20 @@ #include #include #include -#include #include +#include #include #include #include #include #include #include -#include #include +#include #include -#include #include +#include using namespace koinos; using namespace koinos::state_db; @@ -26,129 +26,140 @@ using namespace std::string_literals; struct test_block { - std::string previous; - uint64_t height = 0; - uint64_t nonce = 0; + std::string previous; + uint64_t height = 0; + uint64_t nonce = 0; - crypto::multihash get_id() const; + crypto::multihash get_id() const; }; crypto::multihash test_block::get_id() const { - return crypto::hash( crypto::multicodec::sha2_256, util::converter::to< crypto::multihash >( previous ), height, nonce ); + return crypto::hash( crypto::multicodec::sha2_256, + util::converter::to< crypto::multihash >( previous ), + height, + nonce ); } struct state_db_fixture { - state_db_fixture() - { - initialize_logging( "koinos_test", {}, "info" ); + state_db_fixture() + { + initialize_logging( "koinos_test", {}, "info" ); - temp = std::filesystem::temp_directory_path() / util::random_alphanumeric( 8 ); - std::filesystem::create_directory( temp ); + temp = std::filesystem::temp_directory_path() / util::random_alphanumeric( 8 ); + std::filesystem::create_directory( temp ); - db.open( temp, [&]( state_db::state_node_ptr root ){}, fork_resolution_algorithm::fifo, db.get_unique_lock() ); - } + db.open( temp, [ & ]( state_db::state_node_ptr root ) {}, fork_resolution_algorithm::fifo, db.get_unique_lock() ); + } - ~state_db_fixture() - { - boost::log::core::get()->remove_all_sinks(); - db.close( db.get_unique_lock() ); - std::filesystem::remove_all( temp ); - } + ~state_db_fixture() + { + boost::log::core::get()->remove_all_sinks(); + db.close( db.get_unique_lock() ); + std::filesystem::remove_all( temp ); + } - database db; - std::filesystem::path temp; + database db; + std::filesystem::path temp; }; BOOST_FIXTURE_TEST_SUITE( state_db_tests, state_db_fixture ) BOOST_AUTO_TEST_CASE( basic_test ) -{ try { - BOOST_TEST_MESSAGE( "Creating object" ); - object_space space; - std::string a_key = "a"; - std::string a_val = "alice"; - - auto shared_db_lock = db.get_shared_lock(); +{ + try + { + BOOST_TEST_MESSAGE( "Creating object" ); + object_space space; + std::string a_key = "a"; + std::string a_val = "alice"; - chain::database_key db_key; - *db_key.mutable_space() = space; - db_key.set_key( a_key ); - auto key_size = util::converter::as< std::string >( db_key ).size(); + auto shared_db_lock = db.get_shared_lock(); - crypto::multihash state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); - auto state_1 = db.create_writable_node( db.get_head( shared_db_lock )->id(), state_id, protocol::block_header(), shared_db_lock ); - BOOST_REQUIRE( state_1 ); - BOOST_CHECK_EQUAL( state_1->put_object( space, a_key, &a_val ), a_val.size() + key_size ); + chain::database_key db_key; + *db_key.mutable_space() = space; + db_key.set_key( a_key ); + auto key_size = util::converter::as< std::string >( db_key ).size(); - // Object should not exist on older state node - BOOST_CHECK_EQUAL( db.get_root( shared_db_lock )->get_object( space, a_key ), nullptr ); + crypto::multihash state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); + auto state_1 = db.create_writable_node( db.get_head( shared_db_lock )->id(), + state_id, + protocol::block_header(), + shared_db_lock ); + BOOST_REQUIRE( state_1 ); + BOOST_CHECK_EQUAL( state_1->put_object( space, a_key, &a_val ), a_val.size() + key_size ); - auto ptr = state_1->get_object( space, a_key ); - BOOST_REQUIRE( ptr ); - BOOST_CHECK_EQUAL( *ptr, a_val ); + // Object should not exist on older state node + BOOST_CHECK_EQUAL( db.get_root( shared_db_lock )->get_object( space, a_key ), nullptr ); - BOOST_TEST_MESSAGE( "Modifying object" ); + auto ptr = state_1->get_object( space, a_key ); + BOOST_REQUIRE( ptr ); + BOOST_CHECK_EQUAL( *ptr, a_val ); - a_val = "alicia"; - BOOST_CHECK_EQUAL( state_1->put_object( space, a_key, &a_val ), 1 ); + BOOST_TEST_MESSAGE( "Modifying object" ); - ptr = state_1->get_object( space, a_key ); - BOOST_REQUIRE( ptr ); - BOOST_CHECK_EQUAL( *ptr, a_val ); + a_val = "alicia"; + BOOST_CHECK_EQUAL( state_1->put_object( space, a_key, &a_val ), 1 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 2 ); - auto state_2 = db.create_writable_node( state_1->id(), state_id, protocol::block_header(), shared_db_lock ); - BOOST_CHECK( !state_2 ); + ptr = state_1->get_object( space, a_key ); + BOOST_REQUIRE( ptr ); + BOOST_CHECK_EQUAL( *ptr, a_val ); - db.finalize_node( state_1->id(), shared_db_lock ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 2 ); + auto state_2 = db.create_writable_node( state_1->id(), state_id, protocol::block_header(), shared_db_lock ); + BOOST_CHECK( !state_2 ); - BOOST_REQUIRE_THROW( state_1->put_object( space, a_key, &a_val ), node_finalized ); + db.finalize_node( state_1->id(), shared_db_lock ); - state_2 = db.create_writable_node( state_1->id(), state_id, protocol::block_header(), shared_db_lock ); - BOOST_REQUIRE( state_2 ); - a_val = "alex"; - BOOST_CHECK_EQUAL( state_2->put_object( space, a_key, &a_val ), -2 ); + BOOST_REQUIRE_THROW( state_1->put_object( space, a_key, &a_val ), node_finalized ); - ptr = state_2->get_object( space, a_key ); - BOOST_REQUIRE( ptr ); - BOOST_CHECK_EQUAL( *ptr, a_val ); + state_2 = db.create_writable_node( state_1->id(), state_id, protocol::block_header(), shared_db_lock ); + BOOST_REQUIRE( state_2 ); + a_val = "alex"; + BOOST_CHECK_EQUAL( state_2->put_object( space, a_key, &a_val ), -2 ); - ptr = state_1->get_object( space, a_key ); - BOOST_REQUIRE( ptr ); - BOOST_CHECK_EQUAL( *ptr, "alicia" ); + ptr = state_2->get_object( space, a_key ); + BOOST_REQUIRE( ptr ); + BOOST_CHECK_EQUAL( *ptr, a_val ); - BOOST_TEST_MESSAGE( "Erasing object" ); - state_2->remove_object( space, a_key ); + ptr = state_1->get_object( space, a_key ); + BOOST_REQUIRE( ptr ); + BOOST_CHECK_EQUAL( *ptr, "alicia" ); - BOOST_CHECK( !state_2->get_object( space, a_key ) ); + BOOST_TEST_MESSAGE( "Erasing object" ); + state_2->remove_object( space, a_key ); - db.discard_node( state_2->id(), shared_db_lock ); - state_2 = db.get_node( state_2->id(), shared_db_lock ); - BOOST_CHECK( !state_2 ); + BOOST_CHECK( !state_2->get_object( space, a_key ) ); - ptr = state_1->get_object( space, a_key ); - BOOST_REQUIRE( ptr ); - BOOST_CHECK_EQUAL( *ptr, "alicia" ); + db.discard_node( state_2->id(), shared_db_lock ); + state_2 = db.get_node( state_2->id(), shared_db_lock ); + BOOST_CHECK( !state_2 ); -} KOINOS_CATCH_LOG_AND_RETHROW(info) } + ptr = state_1->get_object( space, a_key ); + BOOST_REQUIRE( ptr ); + BOOST_CHECK_EQUAL( *ptr, "alicia" ); + } + KOINOS_CATCH_LOG_AND_RETHROW( info ) +} BOOST_AUTO_TEST_CASE( fork_tests ) -{ try { - BOOST_TEST_MESSAGE( "Basic fork tests on state_db" ); - crypto::multihash id, prev_id, block_1000_id; - test_block b; +{ + try + { + BOOST_TEST_MESSAGE( "Basic fork tests on state_db" ); + crypto::multihash id, prev_id, block_1000_id; + test_block b; - auto shared_db_lock = db.get_shared_lock(); + auto shared_db_lock = db.get_shared_lock(); - prev_id = db.get_root( shared_db_lock )->id(); + prev_id = db.get_root( shared_db_lock )->id(); - for( uint64_t i = 1; i <= 2000; ++i ) - { + for( uint64_t i = 1; i <= 2'000; ++i ) + { b.previous = util::converter::as< std::string >( prev_id ); - b.height = i; - id = b.get_id(); + b.height = i; + id = b.get_id(); auto new_block = db.create_writable_node( prev_id, id, protocol::block_header(), shared_db_lock ); BOOST_CHECK_EQUAL( b.height, new_block->revision() ); @@ -156,88 +167,95 @@ BOOST_AUTO_TEST_CASE( fork_tests ) prev_id = id; - if( i == 1000 ) block_1000_id = id; - } + if( i == 1'000 ) + block_1000_id = id; + } - BOOST_REQUIRE( db.get_root( shared_db_lock )->id() == crypto::multihash::zero( crypto::multicodec::sha2_256 ) ); - BOOST_REQUIRE( db.get_root( shared_db_lock )->revision() == 0 ); + BOOST_REQUIRE( db.get_root( shared_db_lock )->id() == crypto::multihash::zero( crypto::multicodec::sha2_256 ) ); + BOOST_REQUIRE( db.get_root( shared_db_lock )->revision() == 0 ); - BOOST_REQUIRE( db.get_head( shared_db_lock )->id() == prev_id ); - BOOST_REQUIRE( db.get_head( shared_db_lock )->revision() == 2000 ); + BOOST_REQUIRE( db.get_head( shared_db_lock )->id() == prev_id ); + BOOST_REQUIRE( db.get_head( shared_db_lock )->revision() == 2'000 ); - BOOST_REQUIRE( db.get_node( block_1000_id, shared_db_lock )->id() == block_1000_id ); - BOOST_REQUIRE( db.get_node( block_1000_id, shared_db_lock )->revision() == 1000 ); + BOOST_REQUIRE( db.get_node( block_1000_id, shared_db_lock )->id() == block_1000_id ); + BOOST_REQUIRE( db.get_node( block_1000_id, shared_db_lock )->revision() == 1'000 ); - auto fork_heads = db.get_fork_heads( shared_db_lock ); - BOOST_REQUIRE_EQUAL( fork_heads.size(), 1 ); - BOOST_REQUIRE( fork_heads[0]->id() == db.get_head( shared_db_lock )->id() ); - fork_heads.clear(); + auto fork_heads = db.get_fork_heads( shared_db_lock ); + BOOST_REQUIRE_EQUAL( fork_heads.size(), 1 ); + BOOST_REQUIRE( fork_heads[ 0 ]->id() == db.get_head( shared_db_lock )->id() ); + fork_heads.clear(); - BOOST_TEST_MESSAGE( "Test commit" ); - shared_db_lock.reset(); - db.commit_node( block_1000_id, db.get_unique_lock() ); - shared_db_lock = db.get_shared_lock(); - BOOST_REQUIRE( db.get_root( shared_db_lock )->id() == block_1000_id ); - BOOST_REQUIRE( db.get_root( shared_db_lock )->revision() == 1000 ); + BOOST_TEST_MESSAGE( "Test commit" ); + shared_db_lock.reset(); + db.commit_node( block_1000_id, db.get_unique_lock() ); + shared_db_lock = db.get_shared_lock(); + BOOST_REQUIRE( db.get_root( shared_db_lock )->id() == block_1000_id ); + BOOST_REQUIRE( db.get_root( shared_db_lock )->revision() == 1'000 ); - fork_heads = db.get_fork_heads( shared_db_lock ); - BOOST_REQUIRE_EQUAL( fork_heads.size(), 1 ); - BOOST_REQUIRE( fork_heads[0]->id() == db.get_head( shared_db_lock )->id() ); + fork_heads = db.get_fork_heads( shared_db_lock ); + BOOST_REQUIRE_EQUAL( fork_heads.size(), 1 ); + BOOST_REQUIRE( fork_heads[ 0 ]->id() == db.get_head( shared_db_lock )->id() ); - crypto::multihash block_2000_id = id; + crypto::multihash block_2000_id = id; - BOOST_TEST_MESSAGE( "Test discard" ); - b.previous = util::converter::as< std::string >( db.get_head( shared_db_lock )->id() ); - b.height = db.get_head( shared_db_lock )->revision() + 1; - id = b.get_id(); - db.create_writable_node( util::converter::to< crypto::multihash >( b.previous ), id, protocol::block_header(), shared_db_lock ); - auto new_block = db.get_node( id, shared_db_lock ); - BOOST_REQUIRE( new_block ); + BOOST_TEST_MESSAGE( "Test discard" ); + b.previous = util::converter::as< std::string >( db.get_head( shared_db_lock )->id() ); + b.height = db.get_head( shared_db_lock )->revision() + 1; + id = b.get_id(); + db.create_writable_node( util::converter::to< crypto::multihash >( b.previous ), + id, + protocol::block_header(), + shared_db_lock ); + auto new_block = db.get_node( id, shared_db_lock ); + BOOST_REQUIRE( new_block ); - fork_heads = db.get_fork_heads( shared_db_lock ); - BOOST_REQUIRE_EQUAL( fork_heads.size(), 1 ); - BOOST_REQUIRE( fork_heads[0]->id() == prev_id ); + fork_heads = db.get_fork_heads( shared_db_lock ); + BOOST_REQUIRE_EQUAL( fork_heads.size(), 1 ); + BOOST_REQUIRE( fork_heads[ 0 ]->id() == prev_id ); - db.discard_node( id, shared_db_lock ); + db.discard_node( id, shared_db_lock ); - BOOST_REQUIRE( db.get_head( shared_db_lock )->id() == prev_id ); - BOOST_REQUIRE( db.get_head( shared_db_lock )->revision() == 2000 ); + BOOST_REQUIRE( db.get_head( shared_db_lock )->id() == prev_id ); + BOOST_REQUIRE( db.get_head( shared_db_lock )->revision() == 2'000 ); - fork_heads = db.get_fork_heads( shared_db_lock ); - BOOST_REQUIRE_EQUAL( fork_heads.size(), 1 ); - BOOST_REQUIRE( fork_heads[0]->id() == prev_id ); + fork_heads = db.get_fork_heads( shared_db_lock ); + BOOST_REQUIRE_EQUAL( fork_heads.size(), 1 ); + BOOST_REQUIRE( fork_heads[ 0 ]->id() == prev_id ); - // Shared ptr should still exist, but not be returned with get_node - BOOST_REQUIRE( new_block ); - BOOST_REQUIRE( !db.get_node( id, shared_db_lock ) ); - new_block.reset(); + // Shared ptr should still exist, but not be returned with get_node + BOOST_REQUIRE( new_block ); + BOOST_REQUIRE( !db.get_node( id, shared_db_lock ) ); + new_block.reset(); - // Cannot discard head - BOOST_REQUIRE_THROW( db.discard_node( prev_id, shared_db_lock ), cannot_discard ); + // Cannot discard head + BOOST_REQUIRE_THROW( db.discard_node( prev_id, shared_db_lock ), cannot_discard ); - BOOST_TEST_MESSAGE( "Check duplicate node creation" ); - BOOST_REQUIRE( !db.create_writable_node( db.get_head( shared_db_lock )->parent_id(), db.get_head( shared_db_lock )->id(), protocol::block_header(), shared_db_lock ) ); + BOOST_TEST_MESSAGE( "Check duplicate node creation" ); + BOOST_REQUIRE( !db.create_writable_node( db.get_head( shared_db_lock )->parent_id(), + db.get_head( shared_db_lock )->id(), + protocol::block_header(), + shared_db_lock ) ); - BOOST_TEST_MESSAGE( "Check failed linking" ); - crypto::multihash zero = crypto::multihash::zero( crypto::multicodec::sha2_256 ); - BOOST_REQUIRE( !db.create_writable_node( zero, id, protocol::block_header(), shared_db_lock ) ); + BOOST_TEST_MESSAGE( "Check failed linking" ); + crypto::multihash zero = crypto::multihash::zero( crypto::multicodec::sha2_256 ); + BOOST_REQUIRE( !db.create_writable_node( zero, id, protocol::block_header(), shared_db_lock ) ); - crypto::multihash head_id = db.get_head( shared_db_lock )->id(); - uint64_t head_rev = db.get_head( shared_db_lock )->revision(); + crypto::multihash head_id = db.get_head( shared_db_lock )->id(); + uint64_t head_rev = db.get_head( shared_db_lock )->revision(); - BOOST_TEST_MESSAGE( "Test minority fork" ); - auto fork_node = db.get_node_at_revision( 1995, shared_db_lock ); - prev_id = fork_node->id(); - b.nonce = 1; + BOOST_TEST_MESSAGE( "Test minority fork" ); + auto fork_node = db.get_node_at_revision( 1'995, shared_db_lock ); + prev_id = fork_node->id(); + b.nonce = 1; - auto old_block_1996_id = db.get_node_at_revision( 1996, shared_db_lock )->id(); - auto old_block_1997_id = db.get_node_at_revision( 1997, shared_db_lock )->id(); + auto old_block_1996_id = db.get_node_at_revision( 1'996, shared_db_lock )->id(); + auto old_block_1997_id = db.get_node_at_revision( 1'997, shared_db_lock )->id(); - for ( uint64_t i = 1; i <= 5; ++i ) - { + for( uint64_t i = 1; i <= 5; ++i ) + { b.previous = util::converter::as< std::string >( prev_id ); - b.height = fork_node->revision() + i; - id = b.get_id(); + b.height = fork_node->revision() + i; + id = b.get_id(); auto new_block = db.create_writable_node( prev_id, id, protocol::block_header(), shared_db_lock ); BOOST_CHECK_EQUAL( b.height, new_block->revision() ); @@ -247,65 +265,68 @@ BOOST_AUTO_TEST_CASE( fork_tests ) BOOST_CHECK( db.get_head( shared_db_lock )->revision() == head_rev ); prev_id = id; - } - - fork_heads = db.get_fork_heads( shared_db_lock ); - BOOST_REQUIRE_EQUAL( fork_heads.size(), 2 ); - BOOST_REQUIRE( ( fork_heads[0]->id() == db.get_head( shared_db_lock )->id() && fork_heads[1]->id() == id ) || - ( fork_heads[1]->id() == db.get_head( shared_db_lock )->id() && fork_heads[0]->id() == id ) ); - auto old_head_id = db.get_head( shared_db_lock )->id(); - - b.previous = util::converter::as< std::string >( prev_id ); - b.height = head_rev + 1; - id = b.get_id(); - - // When this node finalizes, it will be the longest path and should become head - new_block = db.create_writable_node( prev_id, id, protocol::block_header(), shared_db_lock ); - BOOST_CHECK_EQUAL( b.height, new_block->revision() ); - - BOOST_CHECK( db.get_head( shared_db_lock )->id() == head_id ); - BOOST_CHECK( db.get_head( shared_db_lock )->revision() == head_rev ); - - db.finalize_node( id, shared_db_lock ); - - fork_heads = db.get_fork_heads( shared_db_lock ); - BOOST_REQUIRE_EQUAL( fork_heads.size(), 2 ); - BOOST_REQUIRE( ( fork_heads[0]->id() == id && fork_heads[1]->id() == old_head_id ) || - ( fork_heads[1]->id() == id && fork_heads[0]->id() == old_head_id ) ); - - BOOST_CHECK( db.get_head( shared_db_lock )->id() == id ); - BOOST_CHECK( db.get_head( shared_db_lock )->revision() == b.height ); - - db.discard_node( old_block_1997_id, shared_db_lock ); - fork_heads = db.get_fork_heads( shared_db_lock ); - BOOST_REQUIRE_EQUAL( fork_heads.size(), 2 ); - BOOST_REQUIRE( ( fork_heads[0]->id() == id && fork_heads[1]->id() == old_block_1996_id ) || - ( fork_heads[1]->id() == id && fork_heads[0]->id() == old_block_1996_id ) ); - - db.discard_node( old_block_1996_id, shared_db_lock ); - fork_heads = db.get_fork_heads( shared_db_lock ); - BOOST_REQUIRE_EQUAL( fork_heads.size(), 1 ); - BOOST_REQUIRE( fork_heads[0]->id() == id ); - -} KOINOS_CATCH_LOG_AND_RETHROW(info) } + } + + fork_heads = db.get_fork_heads( shared_db_lock ); + BOOST_REQUIRE_EQUAL( fork_heads.size(), 2 ); + BOOST_REQUIRE( ( fork_heads[ 0 ]->id() == db.get_head( shared_db_lock )->id() && fork_heads[ 1 ]->id() == id ) + || ( fork_heads[ 1 ]->id() == db.get_head( shared_db_lock )->id() && fork_heads[ 0 ]->id() == id ) ); + auto old_head_id = db.get_head( shared_db_lock )->id(); + + b.previous = util::converter::as< std::string >( prev_id ); + b.height = head_rev + 1; + id = b.get_id(); + + // When this node finalizes, it will be the longest path and should become head + new_block = db.create_writable_node( prev_id, id, protocol::block_header(), shared_db_lock ); + BOOST_CHECK_EQUAL( b.height, new_block->revision() ); + + BOOST_CHECK( db.get_head( shared_db_lock )->id() == head_id ); + BOOST_CHECK( db.get_head( shared_db_lock )->revision() == head_rev ); + + db.finalize_node( id, shared_db_lock ); + + fork_heads = db.get_fork_heads( shared_db_lock ); + BOOST_REQUIRE_EQUAL( fork_heads.size(), 2 ); + BOOST_REQUIRE( ( fork_heads[ 0 ]->id() == id && fork_heads[ 1 ]->id() == old_head_id ) + || ( fork_heads[ 1 ]->id() == id && fork_heads[ 0 ]->id() == old_head_id ) ); + + BOOST_CHECK( db.get_head( shared_db_lock )->id() == id ); + BOOST_CHECK( db.get_head( shared_db_lock )->revision() == b.height ); + + db.discard_node( old_block_1997_id, shared_db_lock ); + fork_heads = db.get_fork_heads( shared_db_lock ); + BOOST_REQUIRE_EQUAL( fork_heads.size(), 2 ); + BOOST_REQUIRE( ( fork_heads[ 0 ]->id() == id && fork_heads[ 1 ]->id() == old_block_1996_id ) + || ( fork_heads[ 1 ]->id() == id && fork_heads[ 0 ]->id() == old_block_1996_id ) ); + + db.discard_node( old_block_1996_id, shared_db_lock ); + fork_heads = db.get_fork_heads( shared_db_lock ); + BOOST_REQUIRE_EQUAL( fork_heads.size(), 1 ); + BOOST_REQUIRE( fork_heads[ 0 ]->id() == id ); + } + KOINOS_CATCH_LOG_AND_RETHROW( info ) +} BOOST_AUTO_TEST_CASE( merge_iterator ) -{ try { - std::filesystem::path temp = std::filesystem::temp_directory_path() / koinos::util::random_alphanumeric( 8 ); - std::filesystem::create_directory( temp ); - - using state_delta_ptr = std::shared_ptr< state_delta >; - std::deque< state_delta_ptr > delta_queue; - delta_queue.emplace_back( std::make_shared< state_delta >( temp ) ); - - // alice: 1 - // bob: 2 - // charlie: 3 - delta_queue.back()->put( "alice", "1" ); - delta_queue.back()->put( "bob", "2" ); - delta_queue.back()->put( "charlie", "3" ); - - { +{ + try + { + std::filesystem::path temp = std::filesystem::temp_directory_path() / koinos::util::random_alphanumeric( 8 ); + std::filesystem::create_directory( temp ); + + using state_delta_ptr = std::shared_ptr< state_delta >; + std::deque< state_delta_ptr > delta_queue; + delta_queue.emplace_back( std::make_shared< state_delta >( temp ) ); + + // alice: 1 + // bob: 2 + // charlie: 3 + delta_queue.back()->put( "alice", "1" ); + delta_queue.back()->put( "bob", "2" ); + delta_queue.back()->put( "charlie", "3" ); + + { merge_state m_state( delta_queue.back() ); auto itr = m_state.begin(); @@ -332,17 +353,16 @@ BOOST_AUTO_TEST_CASE( merge_iterator ) --itr; BOOST_CHECK_EQUAL( itr.key(), "alice" ); BOOST_CHECK_EQUAL( *itr, "1" ); - } - + } - // alice: 4 - // bob: 5 - // charlie: 3 (not changed) - delta_queue.emplace_back( delta_queue.back()->make_child( delta_queue.back()->id() ) ); - delta_queue.back()->put( "alice", "4" ); - delta_queue.back()->put( "bob", "5" ); + // alice: 4 + // bob: 5 + // charlie: 3 (not changed) + delta_queue.emplace_back( delta_queue.back()->make_child( delta_queue.back()->id() ) ); + delta_queue.back()->put( "alice", "4" ); + delta_queue.back()->put( "bob", "5" ); - { + { merge_state m_state( delta_queue.back() ); auto itr = m_state.begin(); @@ -369,15 +389,15 @@ BOOST_AUTO_TEST_CASE( merge_iterator ) --itr; BOOST_CHECK_EQUAL( itr.key(), "alice" ); BOOST_CHECK_EQUAL( *itr, "4" ); - } + } - // alice: 4 (not changed) - // bob: 6 - // charlie: 3 (not changed) - delta_queue.emplace_back( delta_queue.back()->make_child( delta_queue.back()->id() ) ); - delta_queue.back()->put( "bob", "6" ); + // alice: 4 (not changed) + // bob: 6 + // charlie: 3 (not changed) + delta_queue.emplace_back( delta_queue.back()->make_child( delta_queue.back()->id() ) ); + delta_queue.back()->put( "bob", "6" ); - { + { merge_state m_state( delta_queue.back() ); auto itr = m_state.begin(); @@ -404,15 +424,15 @@ BOOST_AUTO_TEST_CASE( merge_iterator ) --itr; BOOST_CHECK_EQUAL( itr.key(), "alice" ); BOOST_CHECK_EQUAL( *itr, "4" ); - } + } - // alice: (removed) - // bob: 6 (not changed) - // charlie: 3 (not changed) - delta_queue.emplace_back( delta_queue.back()->make_child( delta_queue.back()->id() ) ); - delta_queue.back()->erase( "alice" ); + // alice: (removed) + // bob: 6 (not changed) + // charlie: 3 (not changed) + delta_queue.emplace_back( delta_queue.back()->make_child( delta_queue.back()->id() ) ); + delta_queue.back()->erase( "alice" ); - { + { merge_state m_state( delta_queue.back() ); auto itr = m_state.begin(); @@ -433,15 +453,15 @@ BOOST_AUTO_TEST_CASE( merge_iterator ) --itr; BOOST_CHECK_EQUAL( itr.key(), "bob" ); BOOST_CHECK_EQUAL( *itr, "6" ); - } + } - // alice: 4 (restored) - // bob: 6 (not changed) - // charlie: 3 (not changed) - delta_queue.emplace_back( delta_queue.back()->make_child( delta_queue.back()->id() ) ); - delta_queue.back()->put( "alice", "4" ); + // alice: 4 (restored) + // bob: 6 (not changed) + // charlie: 3 (not changed) + delta_queue.emplace_back( delta_queue.back()->make_child( delta_queue.back()->id() ) ); + delta_queue.back()->put( "alice", "4" ); - { + { merge_state m_state( delta_queue.back() ); auto itr = m_state.begin(); @@ -468,13 +488,13 @@ BOOST_AUTO_TEST_CASE( merge_iterator ) --itr; BOOST_CHECK_EQUAL( itr.key(), "alice" ); BOOST_CHECK_EQUAL( *itr, "4" ); - } + } - delta_queue.pop_front(); - delta_queue.pop_front(); - delta_queue.front()->commit(); + delta_queue.pop_front(); + delta_queue.pop_front(); + delta_queue.front()->commit(); - { + { merge_state m_state( delta_queue.back() ); auto itr = m_state.begin(); @@ -501,10 +521,10 @@ BOOST_AUTO_TEST_CASE( merge_iterator ) --itr; BOOST_CHECK_EQUAL( itr.key(), "alice" ); BOOST_CHECK_EQUAL( *itr, "4" ); - } + } - while( delta_queue.size() > 1 ) - { + while( delta_queue.size() > 1 ) + { delta_queue.pop_front(); delta_queue.front()->commit(); @@ -534,128 +554,154 @@ BOOST_AUTO_TEST_CASE( merge_iterator ) --itr; BOOST_CHECK_EQUAL( itr.key(), "alice" ); BOOST_CHECK_EQUAL( *itr, "4" ); - } -} KOINOS_CATCH_LOG_AND_RETHROW(info) } + } + } + KOINOS_CATCH_LOG_AND_RETHROW( info ) +} BOOST_AUTO_TEST_CASE( reset_test ) -{ try { - BOOST_TEST_MESSAGE( "Creating object on transient state node" ); - - auto shared_db_lock = db.get_shared_lock(); - - crypto::multihash state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); - auto state_1 = db.create_writable_node( db.get_head( shared_db_lock )->id(), state_id, protocol::block_header(), shared_db_lock ); - object_space space; - std::string a_key = "a"; - std::string a_val = "alice"; - - chain::database_key db_key; - *db_key.mutable_space() = space; - db_key.set_key( a_key ); - auto key_size = util::converter::as< std::string >( db_key ).size(); - - BOOST_CHECK_EQUAL( state_1->put_object( space, a_key, &a_val ), a_val.size() + key_size ); - db.finalize_node( state_1->id(), shared_db_lock ); - - auto val_ptr = db.get_head( shared_db_lock )->get_object( space, a_key ); - BOOST_REQUIRE( val_ptr ); - BOOST_CHECK_EQUAL( *val_ptr, a_val ); - - BOOST_TEST_MESSAGE( "Closing and opening database" ); - shared_db_lock.reset(); - state_1.reset(); - db.close( db.get_unique_lock() ); - - BOOST_CHECK_THROW( db.reset( db.get_unique_lock() ), koinos::exception ); - - shared_db_lock = db.get_shared_lock(); - BOOST_CHECK_THROW( db.get_node_at_revision( 1, shared_db_lock ), koinos::exception ); - BOOST_CHECK_THROW( db.get_node_at_revision( 1, crypto::hash( crypto::multicodec::sha2_256, 1 ), shared_db_lock ), koinos::exception ); - BOOST_CHECK_THROW( db.get_node( crypto::hash( crypto::multicodec::sha2_256, 1 ), shared_db_lock ), koinos::exception ); - BOOST_CHECK_THROW( db.create_writable_node( crypto::multihash::zero( crypto::multicodec::sha2_256 ), crypto::hash( crypto::multicodec::sha2_256, 1 ), protocol::block_header(), shared_db_lock ), koinos::exception ); - BOOST_CHECK_THROW( db.finalize_node( crypto::hash( crypto::multicodec::sha2_256, 1 ), shared_db_lock ), koinos::exception ); - BOOST_CHECK_THROW( db.discard_node( crypto::hash( crypto::multicodec::sha2_256, 1 ), shared_db_lock ), koinos::exception ); - BOOST_CHECK_THROW( db.get_head( shared_db_lock ), koinos::exception ); - BOOST_CHECK_THROW( db.get_fork_heads( shared_db_lock ), koinos::exception ); - BOOST_CHECK_THROW( db.get_root( shared_db_lock ), koinos::exception ); - shared_db_lock.reset(); - - BOOST_CHECK_THROW( db.commit_node( crypto::hash( crypto::multicodec::sha2_256, 1 ), db.get_unique_lock() ), koinos::exception ); - - db.open( temp, []( state_db::state_node_ptr root ){}, &state_db::fifo_comparator, db.get_unique_lock() ); - - shared_db_lock = db.get_shared_lock(); - - // Object should not exist on persistent database (state node was not committed) - BOOST_CHECK( !db.get_head( shared_db_lock )->get_object( space, a_key ) ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == crypto::multihash::zero( crypto::multicodec::sha2_256 ) ); - BOOST_CHECK( db.get_head( shared_db_lock )->revision() == 0 ); - - BOOST_TEST_MESSAGE( "Creating object on committed state node" ); - - state_1 = db.create_writable_node( db.get_head( shared_db_lock )->id(), state_id, protocol::block_header(), shared_db_lock ); - BOOST_CHECK_EQUAL( state_1->put_object( space, a_key, &a_val ), a_val.size() + key_size ); - db.finalize_node( state_1->id(), shared_db_lock ); - auto state_1_id = state_1->id(); - state_1.reset(); - shared_db_lock.reset(); - db.commit_node( state_1_id, db.get_unique_lock() ); - - shared_db_lock = db.get_shared_lock(); - val_ptr = db.get_head( shared_db_lock )->get_object( space, a_key ); - BOOST_REQUIRE( val_ptr ); - BOOST_CHECK_EQUAL( *val_ptr, a_val ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == crypto::hash( crypto::multicodec::sha2_256, 1 ) ); - - BOOST_TEST_MESSAGE( "Closing and opening database" ); - shared_db_lock.reset(); - state_1.reset(); - db.close( db.get_unique_lock() ); - db.open( temp, []( state_db::state_node_ptr root ){}, &state_db::fifo_comparator, db.get_unique_lock() ); - - // State node was committed and should exist on open - shared_db_lock = db.get_shared_lock(); - val_ptr = db.get_head( shared_db_lock )->get_object( space, a_key ); - BOOST_REQUIRE( val_ptr ); - BOOST_CHECK_EQUAL( *val_ptr, a_val ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == crypto::hash( crypto::multicodec::sha2_256, 1 ) ); - BOOST_CHECK( db.get_head( shared_db_lock )->revision() == 1 ); - - BOOST_TEST_MESSAGE( "Resetting database" ); - shared_db_lock.reset(); - db.reset( db.get_unique_lock() ); - - // Object should not exist on reset db - shared_db_lock = db.get_shared_lock(); - BOOST_CHECK( !db.get_head( shared_db_lock )->get_object( space, a_key ) ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == crypto::multihash::zero( crypto::multicodec::sha2_256 ) ); - BOOST_CHECK( db.get_head( shared_db_lock )->revision() == 0 ); -} KOINOS_CATCH_LOG_AND_RETHROW(info) } +{ + try + { + BOOST_TEST_MESSAGE( "Creating object on transient state node" ); + + auto shared_db_lock = db.get_shared_lock(); + + crypto::multihash state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); + auto state_1 = db.create_writable_node( db.get_head( shared_db_lock )->id(), + state_id, + protocol::block_header(), + shared_db_lock ); + object_space space; + std::string a_key = "a"; + std::string a_val = "alice"; + + chain::database_key db_key; + *db_key.mutable_space() = space; + db_key.set_key( a_key ); + auto key_size = util::converter::as< std::string >( db_key ).size(); + + BOOST_CHECK_EQUAL( state_1->put_object( space, a_key, &a_val ), a_val.size() + key_size ); + db.finalize_node( state_1->id(), shared_db_lock ); + + auto val_ptr = db.get_head( shared_db_lock )->get_object( space, a_key ); + BOOST_REQUIRE( val_ptr ); + BOOST_CHECK_EQUAL( *val_ptr, a_val ); + + BOOST_TEST_MESSAGE( "Closing and opening database" ); + shared_db_lock.reset(); + state_1.reset(); + db.close( db.get_unique_lock() ); + + BOOST_CHECK_THROW( db.reset( db.get_unique_lock() ), koinos::exception ); + + shared_db_lock = db.get_shared_lock(); + BOOST_CHECK_THROW( db.get_node_at_revision( 1, shared_db_lock ), koinos::exception ); + BOOST_CHECK_THROW( db.get_node_at_revision( 1, crypto::hash( crypto::multicodec::sha2_256, 1 ), shared_db_lock ), + koinos::exception ); + BOOST_CHECK_THROW( db.get_node( crypto::hash( crypto::multicodec::sha2_256, 1 ), shared_db_lock ), + koinos::exception ); + BOOST_CHECK_THROW( db.create_writable_node( crypto::multihash::zero( crypto::multicodec::sha2_256 ), + crypto::hash( crypto::multicodec::sha2_256, 1 ), + protocol::block_header(), + shared_db_lock ), + koinos::exception ); + BOOST_CHECK_THROW( db.finalize_node( crypto::hash( crypto::multicodec::sha2_256, 1 ), shared_db_lock ), + koinos::exception ); + BOOST_CHECK_THROW( db.discard_node( crypto::hash( crypto::multicodec::sha2_256, 1 ), shared_db_lock ), + koinos::exception ); + BOOST_CHECK_THROW( db.get_head( shared_db_lock ), koinos::exception ); + BOOST_CHECK_THROW( db.get_fork_heads( shared_db_lock ), koinos::exception ); + BOOST_CHECK_THROW( db.get_root( shared_db_lock ), koinos::exception ); + shared_db_lock.reset(); + + BOOST_CHECK_THROW( db.commit_node( crypto::hash( crypto::multicodec::sha2_256, 1 ), db.get_unique_lock() ), + koinos::exception ); + + db.open( temp, []( state_db::state_node_ptr root ) {}, &state_db::fifo_comparator, db.get_unique_lock() ); + + shared_db_lock = db.get_shared_lock(); + + // Object should not exist on persistent database (state node was not committed) + BOOST_CHECK( !db.get_head( shared_db_lock )->get_object( space, a_key ) ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == crypto::multihash::zero( crypto::multicodec::sha2_256 ) ); + BOOST_CHECK( db.get_head( shared_db_lock )->revision() == 0 ); + + BOOST_TEST_MESSAGE( "Creating object on committed state node" ); + + state_1 = db.create_writable_node( db.get_head( shared_db_lock )->id(), + state_id, + protocol::block_header(), + shared_db_lock ); + BOOST_CHECK_EQUAL( state_1->put_object( space, a_key, &a_val ), a_val.size() + key_size ); + db.finalize_node( state_1->id(), shared_db_lock ); + auto state_1_id = state_1->id(); + state_1.reset(); + shared_db_lock.reset(); + db.commit_node( state_1_id, db.get_unique_lock() ); + + shared_db_lock = db.get_shared_lock(); + val_ptr = db.get_head( shared_db_lock )->get_object( space, a_key ); + BOOST_REQUIRE( val_ptr ); + BOOST_CHECK_EQUAL( *val_ptr, a_val ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == crypto::hash( crypto::multicodec::sha2_256, 1 ) ); + + BOOST_TEST_MESSAGE( "Closing and opening database" ); + shared_db_lock.reset(); + state_1.reset(); + db.close( db.get_unique_lock() ); + db.open( temp, []( state_db::state_node_ptr root ) {}, &state_db::fifo_comparator, db.get_unique_lock() ); + + // State node was committed and should exist on open + shared_db_lock = db.get_shared_lock(); + val_ptr = db.get_head( shared_db_lock )->get_object( space, a_key ); + BOOST_REQUIRE( val_ptr ); + BOOST_CHECK_EQUAL( *val_ptr, a_val ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == crypto::hash( crypto::multicodec::sha2_256, 1 ) ); + BOOST_CHECK( db.get_head( shared_db_lock )->revision() == 1 ); + + BOOST_TEST_MESSAGE( "Resetting database" ); + shared_db_lock.reset(); + db.reset( db.get_unique_lock() ); + + // Object should not exist on reset db + shared_db_lock = db.get_shared_lock(); + BOOST_CHECK( !db.get_head( shared_db_lock )->get_object( space, a_key ) ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == crypto::multihash::zero( crypto::multicodec::sha2_256 ) ); + BOOST_CHECK( db.get_head( shared_db_lock )->revision() == 0 ); + } + KOINOS_CATCH_LOG_AND_RETHROW( info ) +} BOOST_AUTO_TEST_CASE( anonymous_node_test ) -{ try { - BOOST_TEST_MESSAGE( "Creating object" ); - object_space space; +{ + try + { + BOOST_TEST_MESSAGE( "Creating object" ); + object_space space; - auto shared_db_lock = db.get_shared_lock(); + auto shared_db_lock = db.get_shared_lock(); - crypto::multihash state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); - auto state_1 = db.create_writable_node( db.get_head( shared_db_lock )->id(), state_id, protocol::block_header(), shared_db_lock ); - std::string a_key = "a"; - std::string a_val = "alice"; + crypto::multihash state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); + auto state_1 = db.create_writable_node( db.get_head( shared_db_lock )->id(), + state_id, + protocol::block_header(), + shared_db_lock ); + std::string a_key = "a"; + std::string a_val = "alice"; - chain::database_key db_key; - *db_key.mutable_space() = space; - db_key.set_key( a_key ); - auto key_size = util::converter::as< std::string >( db_key ).size(); + chain::database_key db_key; + *db_key.mutable_space() = space; + db_key.set_key( a_key ); + auto key_size = util::converter::as< std::string >( db_key ).size(); - BOOST_CHECK( state_1->put_object( space, a_key, &a_val ) == a_val.size() + key_size ); + BOOST_CHECK( state_1->put_object( space, a_key, &a_val ) == a_val.size() + key_size ); - auto ptr = state_1->get_object( space, a_key ); - BOOST_REQUIRE( ptr ); - BOOST_CHECK_EQUAL( *ptr, a_val ); + auto ptr = state_1->get_object( space, a_key ); + BOOST_REQUIRE( ptr ); + BOOST_CHECK_EQUAL( *ptr, a_val ); - { + { BOOST_TEST_MESSAGE( "Creating anonymous state node" ); auto anon_state = state_1->create_anonymous_node(); @@ -677,9 +723,9 @@ BOOST_AUTO_TEST_CASE( anonymous_node_test ) BOOST_CHECK_EQUAL( *ptr, "alice" ); BOOST_TEST_MESSAGE( "Deleting anonymous node" ); - } + } - { + { BOOST_TEST_MESSAGE( "Creating anonymous state node" ); auto anon_state = state_1->create_anonymous_node(); @@ -701,1042 +747,1125 @@ BOOST_AUTO_TEST_CASE( anonymous_node_test ) ptr = state_1->get_object( space, a_key ); BOOST_REQUIRE( ptr ); BOOST_CHECK_EQUAL( *ptr, a_val ); - } - - ptr = state_1->get_object( space, a_key ); - BOOST_REQUIRE( ptr ); - BOOST_CHECK_EQUAL( *ptr, a_val ); + } -} KOINOS_CATCH_LOG_AND_RETHROW(info) } + ptr = state_1->get_object( space, a_key ); + BOOST_REQUIRE( ptr ); + BOOST_CHECK_EQUAL( *ptr, a_val ); + } + KOINOS_CATCH_LOG_AND_RETHROW( info ) +} BOOST_AUTO_TEST_CASE( merkle_root_test ) -{ try { - auto shared_db_lock = db.get_shared_lock(); - - auto state_1_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); - auto state_1 = db.create_writable_node( db.get_head( shared_db_lock )->id(), state_1_id, protocol::block_header(), shared_db_lock ); - - object_space space; - std::string a_key = "a"; - std::string a_val = "alice"; - std::string b_key = "b"; - std::string b_val = "bob"; - std::string c_key = "c"; - std::string c_val = "charlie"; - - state_1->put_object( space, c_key, &c_val ); - state_1->put_object( space, b_key, &b_val ); - state_1->put_object( space, a_key, &a_val ); - - chain::database_key a_db_key; - *a_db_key.mutable_space() = space; - a_db_key.set_key( a_key ); - - chain::database_key b_db_key; - *b_db_key.mutable_space() = space; - b_db_key.set_key( b_key ); - - chain::database_key c_db_key; - *c_db_key.mutable_space() = space; - c_db_key.set_key( c_key ); - - std::vector< std::string > merkle_leafs; - merkle_leafs.emplace_back( koinos::util::converter::as< std::string >( a_db_key ) ); - merkle_leafs.push_back( a_val ); - merkle_leafs.emplace_back( koinos::util::converter::as< std::string >( b_db_key ) ); - merkle_leafs.push_back( b_val ); - merkle_leafs.emplace_back( koinos::util::converter::as< std::string >( c_db_key ) ); - merkle_leafs.push_back( c_val ); - - BOOST_CHECK_THROW( state_1->merkle_root(), koinos::exception ); - db.finalize_node( state_1_id, shared_db_lock ); - - auto merkle_root = koinos::crypto::merkle_tree< std::string >( koinos::crypto::multicodec::sha2_256, merkle_leafs ).root()->hash(); - BOOST_CHECK_EQUAL( merkle_root, state_1->merkle_root() ); - - auto state_2_id = crypto::hash( crypto::multicodec::sha2_256, 2 ); - auto state_2 = db.create_writable_node( state_1_id, state_2_id, protocol::block_header(), shared_db_lock ); - - std::string d_key = "d"; - std::string d_val = "dave"; - a_val = "alicia"; - - state_2->put_object( space, a_key, &a_val ); - state_2->put_object( space, d_key, &d_val ); - state_2->remove_object( space, b_key ); - - chain::database_key d_db_key; - *d_db_key.mutable_space() = space; - d_db_key.set_key( d_key ); - - merkle_leafs.clear(); - merkle_leafs.emplace_back( koinos::util::converter::as< std::string >( a_db_key ) ); - merkle_leafs.push_back( a_val ); - merkle_leafs.emplace_back( koinos::util::converter::as< std::string >( b_db_key ) ); - merkle_leafs.push_back( "" ); - merkle_leafs.emplace_back( koinos::util::converter::as< std::string >( d_db_key ) ); - merkle_leafs.push_back( d_val ); - - db.finalize_node( state_2_id, shared_db_lock ); - merkle_root = koinos::crypto::merkle_tree< std::string >( koinos::crypto::multicodec::sha2_256, merkle_leafs ).root()->hash(); - BOOST_CHECK_EQUAL( merkle_root, state_2->merkle_root() ); - - shared_db_lock.reset(); - state_1.reset(); - state_2.reset(); - db.commit_node( state_2_id, db.get_unique_lock() ); - state_2 = db.get_node( state_2_id, db.get_shared_lock() ); - BOOST_CHECK_EQUAL( merkle_root, state_2->merkle_root() ); - -} KOINOS_CATCH_LOG_AND_RETHROW(info) } +{ + try + { + auto shared_db_lock = db.get_shared_lock(); + + auto state_1_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); + auto state_1 = db.create_writable_node( db.get_head( shared_db_lock )->id(), + state_1_id, + protocol::block_header(), + shared_db_lock ); + + object_space space; + std::string a_key = "a"; + std::string a_val = "alice"; + std::string b_key = "b"; + std::string b_val = "bob"; + std::string c_key = "c"; + std::string c_val = "charlie"; + + state_1->put_object( space, c_key, &c_val ); + state_1->put_object( space, b_key, &b_val ); + state_1->put_object( space, a_key, &a_val ); + + chain::database_key a_db_key; + *a_db_key.mutable_space() = space; + a_db_key.set_key( a_key ); + + chain::database_key b_db_key; + *b_db_key.mutable_space() = space; + b_db_key.set_key( b_key ); + + chain::database_key c_db_key; + *c_db_key.mutable_space() = space; + c_db_key.set_key( c_key ); + + std::vector< std::string > merkle_leafs; + merkle_leafs.emplace_back( koinos::util::converter::as< std::string >( a_db_key ) ); + merkle_leafs.push_back( a_val ); + merkle_leafs.emplace_back( koinos::util::converter::as< std::string >( b_db_key ) ); + merkle_leafs.push_back( b_val ); + merkle_leafs.emplace_back( koinos::util::converter::as< std::string >( c_db_key ) ); + merkle_leafs.push_back( c_val ); + + BOOST_CHECK_THROW( state_1->merkle_root(), koinos::exception ); + db.finalize_node( state_1_id, shared_db_lock ); + + auto merkle_root = + koinos::crypto::merkle_tree< std::string >( koinos::crypto::multicodec::sha2_256, merkle_leafs ).root()->hash(); + BOOST_CHECK_EQUAL( merkle_root, state_1->merkle_root() ); + + auto state_2_id = crypto::hash( crypto::multicodec::sha2_256, 2 ); + auto state_2 = db.create_writable_node( state_1_id, state_2_id, protocol::block_header(), shared_db_lock ); + + std::string d_key = "d"; + std::string d_val = "dave"; + a_val = "alicia"; + + state_2->put_object( space, a_key, &a_val ); + state_2->put_object( space, d_key, &d_val ); + state_2->remove_object( space, b_key ); + + chain::database_key d_db_key; + *d_db_key.mutable_space() = space; + d_db_key.set_key( d_key ); + + merkle_leafs.clear(); + merkle_leafs.emplace_back( koinos::util::converter::as< std::string >( a_db_key ) ); + merkle_leafs.push_back( a_val ); + merkle_leafs.emplace_back( koinos::util::converter::as< std::string >( b_db_key ) ); + merkle_leafs.push_back( "" ); + merkle_leafs.emplace_back( koinos::util::converter::as< std::string >( d_db_key ) ); + merkle_leafs.push_back( d_val ); + + db.finalize_node( state_2_id, shared_db_lock ); + merkle_root = + koinos::crypto::merkle_tree< std::string >( koinos::crypto::multicodec::sha2_256, merkle_leafs ).root()->hash(); + BOOST_CHECK_EQUAL( merkle_root, state_2->merkle_root() ); + + shared_db_lock.reset(); + state_1.reset(); + state_2.reset(); + db.commit_node( state_2_id, db.get_unique_lock() ); + state_2 = db.get_node( state_2_id, db.get_shared_lock() ); + BOOST_CHECK_EQUAL( merkle_root, state_2->merkle_root() ); + } + KOINOS_CATCH_LOG_AND_RETHROW( info ) +} BOOST_AUTO_TEST_CASE( get_delta_entries_test ) -{ try { - auto shared_db_lock = db.get_shared_lock(); - - auto state_1_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); - auto state_1 = db.create_writable_node( db.get_head( shared_db_lock )->id(), state_1_id, protocol::block_header(), shared_db_lock ); +{ + try + { + auto shared_db_lock = db.get_shared_lock(); - object_space space; - std::string a_key = "a"; - std::string a_val = "alice"; - std::string b_key = "b"; - std::string b_val = "bob"; - std::string c_key = "c"; - std::string c_val = "charlie"; + auto state_1_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); + auto state_1 = db.create_writable_node( db.get_head( shared_db_lock )->id(), + state_1_id, + protocol::block_header(), + shared_db_lock ); - state_1->put_object( space, c_key, &c_val ); - state_1->put_object( space, b_key, &b_val ); - state_1->put_object( space, a_key, &a_val ); + object_space space; + std::string a_key = "a"; + std::string a_val = "alice"; + std::string b_key = "b"; + std::string b_val = "bob"; + std::string c_key = "c"; + std::string c_val = "charlie"; - chain::database_key a_db_key; - *a_db_key.mutable_space() = space; - a_db_key.set_key( a_key ); + state_1->put_object( space, c_key, &c_val ); + state_1->put_object( space, b_key, &b_val ); + state_1->put_object( space, a_key, &a_val ); - chain::database_key b_db_key; - *b_db_key.mutable_space() = space; - b_db_key.set_key( b_key ); + chain::database_key a_db_key; + *a_db_key.mutable_space() = space; + a_db_key.set_key( a_key ); - chain::database_key c_db_key; - *c_db_key.mutable_space() = space; - c_db_key.set_key( c_key ); + chain::database_key b_db_key; + *b_db_key.mutable_space() = space; + b_db_key.set_key( b_key ); - auto entries = state_1->get_delta_entries(); + chain::database_key c_db_key; + *c_db_key.mutable_space() = space; + c_db_key.set_key( c_key ); - BOOST_CHECK_EQUAL( 3, entries.size() ); + auto entries = state_1->get_delta_entries(); - BOOST_CHECK_EQUAL( a_key, entries[0].key() ); - BOOST_CHECK_EQUAL( space.DebugString(), entries[0].object_space().DebugString() ); - BOOST_CHECK_EQUAL( a_val, entries[0].value() ); + BOOST_CHECK_EQUAL( 3, entries.size() ); - BOOST_CHECK_EQUAL( b_key, entries[1].key() ); - BOOST_CHECK_EQUAL( space.DebugString(), entries[1].object_space().DebugString() ); - BOOST_CHECK_EQUAL( b_val, entries[1].value() ); + BOOST_CHECK_EQUAL( a_key, entries[ 0 ].key() ); + BOOST_CHECK_EQUAL( space.DebugString(), entries[ 0 ].object_space().DebugString() ); + BOOST_CHECK_EQUAL( a_val, entries[ 0 ].value() ); - BOOST_CHECK_EQUAL( c_key, entries[2].key() ); - BOOST_CHECK_EQUAL( space.DebugString(), entries[2].object_space().DebugString() ); - BOOST_CHECK_EQUAL( c_val, entries[2].value() ); + BOOST_CHECK_EQUAL( b_key, entries[ 1 ].key() ); + BOOST_CHECK_EQUAL( space.DebugString(), entries[ 1 ].object_space().DebugString() ); + BOOST_CHECK_EQUAL( b_val, entries[ 1 ].value() ); - db.finalize_node( state_1_id, shared_db_lock ); + BOOST_CHECK_EQUAL( c_key, entries[ 2 ].key() ); + BOOST_CHECK_EQUAL( space.DebugString(), entries[ 2 ].object_space().DebugString() ); + BOOST_CHECK_EQUAL( c_val, entries[ 2 ].value() ); - auto state_2_id = crypto::hash( crypto::multicodec::sha2_256, 2 ); - auto state_2 = db.create_writable_node( state_1_id, state_2_id, protocol::block_header(), shared_db_lock ); + db.finalize_node( state_1_id, shared_db_lock ); - std::string d_key = "d"; - std::string d_val = "dave"; - a_val = "alicia"; + auto state_2_id = crypto::hash( crypto::multicodec::sha2_256, 2 ); + auto state_2 = db.create_writable_node( state_1_id, state_2_id, protocol::block_header(), shared_db_lock ); - state_2->put_object( space, a_key, &a_val ); - state_2->put_object( space, d_key, &d_val ); - state_2->remove_object( space, b_key ); + std::string d_key = "d"; + std::string d_val = "dave"; + a_val = "alicia"; - chain::database_key d_db_key; - *d_db_key.mutable_space() = space; - d_db_key.set_key( d_key ); + state_2->put_object( space, a_key, &a_val ); + state_2->put_object( space, d_key, &d_val ); + state_2->remove_object( space, b_key ); - auto entries2 = state_2->get_delta_entries(); - BOOST_CHECK_EQUAL( 3, entries.size() ); + chain::database_key d_db_key; + *d_db_key.mutable_space() = space; + d_db_key.set_key( d_key ); - BOOST_CHECK_EQUAL( a_key, entries2[0].key() ); - BOOST_CHECK_EQUAL( space.DebugString(), entries2[0].object_space().DebugString() ); - BOOST_CHECK_EQUAL( a_val, entries2[0].value() ); + auto entries2 = state_2->get_delta_entries(); + BOOST_CHECK_EQUAL( 3, entries.size() ); - BOOST_CHECK_EQUAL( b_key, entries2[1].key() ); - BOOST_CHECK_EQUAL( space.DebugString(), entries2[1].object_space().DebugString() ); - BOOST_CHECK_EQUAL( false, entries2[1].has_value() ); // Deleted value + BOOST_CHECK_EQUAL( a_key, entries2[ 0 ].key() ); + BOOST_CHECK_EQUAL( space.DebugString(), entries2[ 0 ].object_space().DebugString() ); + BOOST_CHECK_EQUAL( a_val, entries2[ 0 ].value() ); - BOOST_CHECK_EQUAL( d_key, entries2[2].key() ); - BOOST_CHECK_EQUAL( space.DebugString(), entries2[2].object_space().DebugString() ); - BOOST_CHECK_EQUAL( d_val, entries2[2].value() ); + BOOST_CHECK_EQUAL( b_key, entries2[ 1 ].key() ); + BOOST_CHECK_EQUAL( space.DebugString(), entries2[ 1 ].object_space().DebugString() ); + BOOST_CHECK_EQUAL( false, entries2[ 1 ].has_value() ); // Deleted value -} KOINOS_CATCH_LOG_AND_RETHROW(info) } + BOOST_CHECK_EQUAL( d_key, entries2[ 2 ].key() ); + BOOST_CHECK_EQUAL( space.DebugString(), entries2[ 2 ].object_space().DebugString() ); + BOOST_CHECK_EQUAL( d_val, entries2[ 2 ].value() ); + } + KOINOS_CATCH_LOG_AND_RETHROW( info ) +} BOOST_AUTO_TEST_CASE( rocksdb_backend_test ) -{ try { - koinos::state_db::backends::rocksdb::rocksdb_backend backend; - auto temp = std::filesystem::temp_directory_path() / util::random_alphanumeric( 8 ); - - BOOST_REQUIRE_THROW( backend.open( temp ), koinos::exception ); +{ + try + { + koinos::state_db::backends::rocksdb::rocksdb_backend backend; + auto temp = std::filesystem::temp_directory_path() / util::random_alphanumeric( 8 ); - BOOST_CHECK_THROW( backend.begin(), koinos::exception ); - BOOST_CHECK_THROW( backend.end(), koinos::exception ); - BOOST_CHECK_THROW( backend.put( "foo", "bar" ), koinos::exception ); - BOOST_CHECK_THROW( backend.get( "foo" ), koinos::exception ); - BOOST_CHECK_THROW( backend.erase( "foo" ), koinos::exception ); - BOOST_CHECK_THROW( backend.clear(), koinos::exception ); - BOOST_CHECK_THROW( backend.size(), koinos::exception ); - BOOST_CHECK_THROW( backend.empty(), koinos::exception ); - BOOST_CHECK_THROW( backend.find( "foo" ), koinos::exception ); - BOOST_CHECK_THROW( backend.lower_bound( "foo" ), koinos::exception ); - BOOST_CHECK_THROW( backend.flush(), koinos::exception ); - BOOST_CHECK( backend.revision() == 0 ); - BOOST_CHECK( backend.id() == koinos::crypto::multihash::zero( koinos::crypto::multicodec::sha2_256 ) ); + BOOST_REQUIRE_THROW( backend.open( temp ), koinos::exception ); - std::filesystem::create_directory( temp ); - backend.open( temp ); + BOOST_CHECK_THROW( backend.begin(), koinos::exception ); + BOOST_CHECK_THROW( backend.end(), koinos::exception ); + BOOST_CHECK_THROW( backend.put( "foo", "bar" ), koinos::exception ); + BOOST_CHECK_THROW( backend.get( "foo" ), koinos::exception ); + BOOST_CHECK_THROW( backend.erase( "foo" ), koinos::exception ); + BOOST_CHECK_THROW( backend.clear(), koinos::exception ); + BOOST_CHECK_THROW( backend.size(), koinos::exception ); + BOOST_CHECK_THROW( backend.empty(), koinos::exception ); + BOOST_CHECK_THROW( backend.find( "foo" ), koinos::exception ); + BOOST_CHECK_THROW( backend.lower_bound( "foo" ), koinos::exception ); + BOOST_CHECK_THROW( backend.flush(), koinos::exception ); + BOOST_CHECK( backend.revision() == 0 ); + BOOST_CHECK( backend.id() == koinos::crypto::multihash::zero( koinos::crypto::multicodec::sha2_256 ) ); - auto itr = backend.begin(); - BOOST_CHECK( itr == backend.end() ); + std::filesystem::create_directory( temp ); + backend.open( temp ); - backend.put( "foo", "bar" ); - itr = backend.begin(); - BOOST_CHECK( itr != backend.end() ); - BOOST_CHECK( *itr == "bar" ); + auto itr = backend.begin(); + BOOST_CHECK( itr == backend.end() ); - backend.put( "alice", "bob" ); + backend.put( "foo", "bar" ); + itr = backend.begin(); + BOOST_CHECK( itr != backend.end() ); + BOOST_CHECK( *itr == "bar" ); - itr = backend.begin(); - BOOST_CHECK( itr != backend.end() ); - BOOST_CHECK( *itr == "bob" ); + backend.put( "alice", "bob" ); - ++itr; - BOOST_CHECK( *itr == "bar" ); + itr = backend.begin(); + BOOST_CHECK( itr != backend.end() ); + BOOST_CHECK( *itr == "bob" ); - ++itr; - BOOST_CHECK( itr == backend.end() ); + ++itr; + BOOST_CHECK( *itr == "bar" ); - --itr; - BOOST_CHECK( itr != backend.end() ); - BOOST_CHECK( *itr == "bar" ); + ++itr; + BOOST_CHECK( itr == backend.end() ); - itr = backend.lower_bound( "charlie" ); - BOOST_CHECK( itr != backend.end() ); - BOOST_CHECK( *itr == "bar" ); + --itr; + BOOST_CHECK( itr != backend.end() ); + BOOST_CHECK( *itr == "bar" ); - itr = backend.lower_bound( "foo" ); - BOOST_CHECK( itr != backend.end() ); - BOOST_CHECK( *itr == "bar" ); + itr = backend.lower_bound( "charlie" ); + BOOST_CHECK( itr != backend.end() ); + BOOST_CHECK( *itr == "bar" ); - backend.put( "foo", "blob" ); - itr = backend.find( "foo" ); - BOOST_CHECK( itr != backend.end() ); - BOOST_CHECK( *itr == "blob" ); + itr = backend.lower_bound( "foo" ); + BOOST_CHECK( itr != backend.end() ); + BOOST_CHECK( *itr == "bar" ); - --itr; - BOOST_CHECK( itr != backend.end() ); - BOOST_CHECK( *itr == "bob" ); + backend.put( "foo", "blob" ); + itr = backend.find( "foo" ); + BOOST_CHECK( itr != backend.end() ); + BOOST_CHECK( *itr == "blob" ); - backend.erase( "foo" ); + --itr; + BOOST_CHECK( itr != backend.end() ); + BOOST_CHECK( *itr == "bob" ); - itr = backend.begin(); - BOOST_CHECK( itr != backend.end() ); - BOOST_CHECK( *itr == "bob" ); + backend.erase( "foo" ); - itr = backend.find( "foo" ); - BOOST_CHECK( itr == backend.end() ); + itr = backend.begin(); + BOOST_CHECK( itr != backend.end() ); + BOOST_CHECK( *itr == "bob" ); - backend.erase( "foo" ); + itr = backend.find( "foo" ); + BOOST_CHECK( itr == backend.end() ); - backend.erase( "alice" ); - itr = backend.end(); - BOOST_CHECK( itr == backend.end() ); + backend.erase( "foo" ); - std::filesystem::remove_all( temp ); + backend.erase( "alice" ); + itr = backend.end(); + BOOST_CHECK( itr == backend.end() ); -} KOINOS_CATCH_LOG_AND_RETHROW(info) } + std::filesystem::remove_all( temp ); + } + KOINOS_CATCH_LOG_AND_RETHROW( info ) +} BOOST_AUTO_TEST_CASE( rocksdb_object_cache_test ) -{ try { - std::size_t cache_size = 1024; - koinos::state_db::backends::rocksdb::object_cache cache( cache_size ); - using value_type = koinos::state_db::backends::rocksdb::object_cache::value_type; - - std::string a_key = "a"; - std::string a_val = "alice"; - auto a_ptr = std::make_shared< const value_type >( a_val ); - - { - auto [cache_hit, val] = cache.get( a_key ); +{ + try + { + std::size_t cache_size = 1'024; + koinos::state_db::backends::rocksdb::object_cache cache( cache_size ); + using value_type = koinos::state_db::backends::rocksdb::object_cache::value_type; + + std::string a_key = "a"; + std::string a_val = "alice"; + auto a_ptr = std::make_shared< const value_type >( a_val ); + + { + auto [ cache_hit, val ] = cache.get( a_key ); BOOST_CHECK( !cache_hit ); BOOST_CHECK( !val ); - } + } - BOOST_CHECK( cache.put( a_key, a_ptr ) ); + BOOST_CHECK( cache.put( a_key, a_ptr ) ); - { + { auto [ cache_hit, val_ptr ] = cache.get( a_key ); BOOST_CHECK( cache_hit ); BOOST_REQUIRE( val_ptr ); BOOST_CHECK_EQUAL( *val_ptr, a_val ); - } + } - std::string b_key = "b"; - std::string b_val = "bob"; - auto b_ptr = std::make_shared< const value_type >( b_val ); + std::string b_key = "b"; + std::string b_val = "bob"; + auto b_ptr = std::make_shared< const value_type >( b_val ); - cache.put( b_key, b_ptr ); + cache.put( b_key, b_ptr ); - { + { auto [ cache_hit, val_ptr ] = cache.get( b_key ); BOOST_CHECK( cache_hit ); BOOST_REQUIRE( val_ptr ); BOOST_CHECK_EQUAL( *val_ptr, b_val ); - } + } - // Will put 'a' first in the cache to evict 'b' - cache.get( a_key ); + // Will put 'a' first in the cache to evict 'b' + cache.get( a_key ); - std::string fill_key = "f"; - std::string fill_val( cache_size - a_val.size() - b_val.size() + 1, 'f' ); - auto fill_ptr = std::make_shared< const value_type >( fill_val ); - BOOST_CHECK( cache.put( fill_key, fill_ptr ) ); + std::string fill_key = "f"; + std::string fill_val( cache_size - a_val.size() - b_val.size() + 1, 'f' ); + auto fill_ptr = std::make_shared< const value_type >( fill_val ); + BOOST_CHECK( cache.put( fill_key, fill_ptr ) ); - { + { auto [ cache_hit, val_ptr ] = cache.get( b_key ); BOOST_CHECK( !cache_hit ); BOOST_CHECK( !val_ptr ); - } + } - { + { auto [ cache_hit, val_ptr ] = cache.get( a_key ); BOOST_CHECK( cache_hit ); BOOST_REQUIRE( val_ptr ); BOOST_CHECK_EQUAL( *val_ptr, a_val ); - } + } - BOOST_CHECK( cache.put( fill_key, fill_ptr ) ); - { + BOOST_CHECK( cache.put( fill_key, fill_ptr ) ); + { auto [ cache_hit, val_ptr ] = cache.get( b_key ); BOOST_CHECK( !cache_hit ); BOOST_CHECK( !val_ptr ); - } + } - std::string null_key = "n"; - std::shared_ptr< const value_type > null_ptr; - BOOST_CHECK( !cache.put( null_key, null_ptr ) ); + std::string null_key = "n"; + std::shared_ptr< const value_type > null_ptr; + BOOST_CHECK( !cache.put( null_key, null_ptr ) ); - { + { auto [ cache_hit, val_ptr ] = cache.get( null_key ); BOOST_CHECK( cache_hit ); BOOST_REQUIRE( !val_ptr ); - } - -} KOINOS_CATCH_LOG_AND_RETHROW(info) } + } + } + KOINOS_CATCH_LOG_AND_RETHROW( info ) +} BOOST_AUTO_TEST_CASE( map_backend_test ) -{ try { - koinos::state_db::backends::map::map_backend backend; - - auto itr = backend.begin(); - BOOST_CHECK( itr == backend.end() ); +{ + try + { + koinos::state_db::backends::map::map_backend backend; - backend.put( "foo", "bar" ); - itr = backend.begin(); - BOOST_CHECK( itr != backend.end() ); - BOOST_CHECK( *itr == "bar" ); + auto itr = backend.begin(); + BOOST_CHECK( itr == backend.end() ); - backend.put( "alice", "bob" ); + backend.put( "foo", "bar" ); + itr = backend.begin(); + BOOST_CHECK( itr != backend.end() ); + BOOST_CHECK( *itr == "bar" ); - itr = backend.begin(); - BOOST_CHECK( itr != backend.end() ); - BOOST_CHECK( *itr == "bob" ); + backend.put( "alice", "bob" ); - ++itr; - BOOST_CHECK( *itr == "bar" ); + itr = backend.begin(); + BOOST_CHECK( itr != backend.end() ); + BOOST_CHECK( *itr == "bob" ); - ++itr; - BOOST_CHECK( itr == backend.end() ); + ++itr; + BOOST_CHECK( *itr == "bar" ); - --itr; - BOOST_CHECK( itr != backend.end() ); - BOOST_CHECK( *itr == "bar" ); + ++itr; + BOOST_CHECK( itr == backend.end() ); - itr = backend.lower_bound( "charlie" ); - BOOST_CHECK( itr != backend.end() ); - BOOST_CHECK( *itr == "bar" ); + --itr; + BOOST_CHECK( itr != backend.end() ); + BOOST_CHECK( *itr == "bar" ); - itr = backend.lower_bound( "foo" ); - BOOST_CHECK( itr != backend.end() ); - BOOST_CHECK( *itr == "bar" ); + itr = backend.lower_bound( "charlie" ); + BOOST_CHECK( itr != backend.end() ); + BOOST_CHECK( *itr == "bar" ); - backend.put( "foo", "blob" ); - itr = backend.find( "foo" ); - BOOST_CHECK( itr != backend.end() ); - BOOST_CHECK( *itr == "blob" ); + itr = backend.lower_bound( "foo" ); + BOOST_CHECK( itr != backend.end() ); + BOOST_CHECK( *itr == "bar" ); - --itr; - BOOST_CHECK( itr != backend.end() ); - BOOST_CHECK( *itr == "bob" ); + backend.put( "foo", "blob" ); + itr = backend.find( "foo" ); + BOOST_CHECK( itr != backend.end() ); + BOOST_CHECK( *itr == "blob" ); - backend.erase( "foo" ); + --itr; + BOOST_CHECK( itr != backend.end() ); + BOOST_CHECK( *itr == "bob" ); - itr = backend.begin(); - BOOST_CHECK( itr != backend.end() ); - BOOST_CHECK( *itr == "bob" ); + backend.erase( "foo" ); - itr = backend.find( "foo" ); - BOOST_CHECK( itr == backend.end() ); + itr = backend.begin(); + BOOST_CHECK( itr != backend.end() ); + BOOST_CHECK( *itr == "bob" ); - backend.erase( "foo" ); + itr = backend.find( "foo" ); + BOOST_CHECK( itr == backend.end() ); - backend.erase( "alice" ); - itr = backend.end(); - BOOST_CHECK( itr == backend.end() ); + backend.erase( "foo" ); - backend.put( "foo", "bar" ); - BOOST_REQUIRE( backend.get( "foo" ) ); - BOOST_CHECK_EQUAL( *backend.get( "foo" ), "bar" ); + backend.erase( "alice" ); + itr = backend.end(); + BOOST_CHECK( itr == backend.end() ); -} KOINOS_CATCH_LOG_AND_RETHROW(info) } + backend.put( "foo", "bar" ); + BOOST_REQUIRE( backend.get( "foo" ) ); + BOOST_CHECK_EQUAL( *backend.get( "foo" ), "bar" ); + } + KOINOS_CATCH_LOG_AND_RETHROW( info ) +} BOOST_AUTO_TEST_CASE( fork_resolution ) -{ try { - /** - * The final fork graph looks like the following: - * - * / state_1 (100) --- state_4 (110) - * / \ - * genesis --- state_2 (99) \ state_5 (110) - * \ - * \ state_3 (101) - */ - - BOOST_TEST_MESSAGE( "Test default FIFO fork resolution" ); - - auto shared_db_lock = db.get_shared_lock(); - auto genesis_id = db.get_head( shared_db_lock )->id(); - - protocol::block_header header; - header.set_timestamp( 100 ); - - crypto::multihash state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); - auto state_1 = db.create_writable_node( db.get_head( shared_db_lock )->id(), state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_1 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == genesis_id ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); - - header.set_timestamp( 99 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 2 ); - auto state_2 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_2 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); - - header.set_timestamp( 101 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 3 ); - auto state_3 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_3 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); - - header.set_timestamp( 110 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 4 ); - auto state_4 = db.create_writable_node( state_1->id(), state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_4 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_4->id() ); - - state_id = crypto::hash( crypto::multicodec::sha2_256, 5 ); - auto state_5 = db.create_writable_node( state_1->id(), state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_5 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_4->id() ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_4->id() ); - - shared_db_lock.reset(); - state_1.reset(); - state_2.reset(); - state_3.reset(); - state_4.reset(); - state_5.reset(); - - BOOST_TEST_MESSAGE( "Test block time fork resolution" ); - - db.close( db.get_unique_lock() ); - db.open( temp, [&]( state_node_ptr ){}, &state_db::block_time_comparator, db.get_unique_lock() ); - shared_db_lock = db.get_shared_lock(); - - header.set_timestamp( 100 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); - state_1 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_1 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == genesis_id ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); - - header.set_timestamp( 99 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 2 ); - state_2 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_2 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); - - header.set_timestamp( 101 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 3 ); - state_3 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_3 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock)->id() == state_2->id() ); - - header.set_timestamp( 110 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 4 ); - state_4 = db.create_writable_node( state_1->id(), state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_4 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_4->id() ); - - state_id = crypto::hash( crypto::multicodec::sha2_256, 5 ); - state_5 = db.create_writable_node( state_1->id(), state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_5 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_4->id() ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_4->id() ); - - shared_db_lock.reset(); - state_1.reset(); - state_2.reset(); - state_3.reset(); - state_4.reset(); - state_5.reset(); - - BOOST_TEST_MESSAGE( "Test pob fork resolution" ); - - db.close( db.get_unique_lock() ); - db.open( temp, [&]( state_node_ptr ){}, &state_db::pob_comparator, db.get_unique_lock() ); - shared_db_lock = db.get_shared_lock(); - - std::string signer1 = "signer1"; - std::string signer2 = "signer2"; - std::string signer3 = "signer3"; - std::string signer4 = "signer4"; - std::string signer5 = "signer5"; - - // BEGIN: Mimic block time behavior (as long as signers are different) - - header.set_timestamp( 100 ); - header.set_signer( signer1 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); - state_1 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_1 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == genesis_id ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); - - header.set_timestamp( 99 ); - header.set_signer( signer2 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 2 ); - state_2 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_2 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); - - header.set_timestamp( 101 ); - header.set_signer( signer3 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 3 ); - state_3 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_3 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); - - header.set_timestamp( 110 ); - header.set_signer( signer4 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 4 ); - state_4 = db.create_writable_node( state_1->id(), state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_4 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_4->id() ); - - header.set_signer( signer5 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 5 ); - state_5 = db.create_writable_node( state_1->id(), state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_5 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_4->id() ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_4->id() ); - - // END: Mimic block time behavior (as long as signers are different) - - shared_db_lock.reset(); - state_1.reset(); - state_2.reset(); - state_3.reset(); - state_4.reset(); - state_5.reset(); - - db.close( db.get_unique_lock() ); - db.open( temp, [&]( state_node_ptr ){}, &state_db::pob_comparator, db.get_unique_lock() ); - shared_db_lock = db.get_shared_lock(); - - // BEGIN: Create two forks, then double produce on the newer fork - - /** - * / state_3 (height: 2, time: 101, signer: signer3) <-- Double production - * / - * / state_1 (height: 1, time: 100) - state_4 (height: 2, time: 102, signer: signer3) <-- Double production - * / - * genesis --- state_2 (height: 1, time: 99) <-- Resulting head - * - * - */ - - header.set_timestamp( 100 ); - header.set_signer( signer1 ); - header.set_height( 1 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); - state_1 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_1 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == genesis_id ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); - - header.set_timestamp( 99 ); - header.set_signer( signer2 ); - header.set_height( 1 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 2 ); - state_2 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_2 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); - - header.set_timestamp( 101 ); - header.set_signer( signer3 ); - header.set_height( 2 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 3 ); - state_3 = db.create_writable_node( state_1->id(), state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_3 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_3->id() ); - - header.set_timestamp( 102 ); - header.set_signer( signer3 ); - header.set_height( 2 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 4 ); - state_4 = db.create_writable_node( state_1->id(), state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_4 ); - BOOST_CHECK( db.get_head( shared_db_lock)->id() == state_3->id() ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); - - /** - * Fork heads - * - * / state_3 (height: 2, time: 101) - * / - * / state_1 (height: 1, time: 100) - state_4 (height: 2, time: 102) - * / - * genesis --- state_2 (height: 1, time: 99) - * - * - */ - - auto fork_heads = db.get_fork_heads( shared_db_lock ); - BOOST_REQUIRE( fork_heads.size() == 3 ); - auto it = std::find_if( std::begin( fork_heads ), std::end( fork_heads ), [&]( state_node_ptr p ) { return p->id() == state_2->id(); } ); - BOOST_REQUIRE( it != std::end( fork_heads ) ); - it = std::find_if( std::begin( fork_heads ), std::end( fork_heads ), [&]( state_node_ptr p ) { return p->id() == state_3->id(); } ); - BOOST_REQUIRE( it != std::end( fork_heads ) ); - it = std::find_if( std::begin( fork_heads ), std::end( fork_heads ), [&]( state_node_ptr p ) { return p->id() == state_4->id(); } ); - BOOST_REQUIRE( it != std::end( fork_heads ) ); - fork_heads.clear(); - - // END: Create two forks, then double produce on the newer fork - - shared_db_lock.reset(); - state_1.reset(); - state_2.reset(); - state_3.reset(); - state_4.reset(); - state_5.reset(); - - db.close( db.get_unique_lock() ); - db.open( temp, [&]( state_node_ptr ){}, &state_db::pob_comparator, db.get_unique_lock() ); - shared_db_lock = db.get_shared_lock(); - - // BEGIN: Create two forks, then double produce on the older fork - - /** - * Resulting head / state_3 (height: 2, time: 101, signer: signer3) <-- Double production - * V / - * / state_1 (height: 1, time: 99) --- state_4 (height: 2, time: 102, signer: signer3) <-- Double production - * / - * genesis --- state_2 (height: 1, time: 100) - * - * - */ - - header.set_timestamp( 99 ); - header.set_signer( signer1 ); - header.set_height( 1 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); - state_1 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_1 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == genesis_id ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); - - header.set_timestamp( 100 ); - header.set_signer( signer2 ); - header.set_height( 1 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 2 ); - state_2 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_2 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); - - header.set_timestamp( 101 ); - header.set_signer( signer3 ); - header.set_height( 2 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 3 ); - state_3 = db.create_writable_node( state_1->id(), state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_3 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_3->id() ); - - header.set_timestamp( 102 ); - header.set_signer( signer3 ); - header.set_height( 2 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 4 ); - state_4 = db.create_writable_node( state_1->id(), state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_4 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_3->id() ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); - - /** - * Fork heads - * - * / state_1 (height: 1, time: 99) - * / - * genesis --- state_2 (height: 1, time: 100) - * - * - */ - - fork_heads = db.get_fork_heads( shared_db_lock ); - BOOST_REQUIRE( fork_heads.size() == 2 ); - it = std::find_if( std::begin( fork_heads ), std::end( fork_heads ), [&]( state_node_ptr p ) { return p->id() == state_1->id(); } ); - BOOST_REQUIRE( it != std::end( fork_heads ) ); - it = std::find_if( std::begin( fork_heads ), std::end( fork_heads ), [&]( state_node_ptr p ) { return p->id() == state_2->id(); } ); - BOOST_REQUIRE( it != std::end( fork_heads ) ); - fork_heads.clear(); - - // END: Create two forks, then double produce on the older fork - - shared_db_lock.reset(); - state_1.reset(); - state_2.reset(); - state_3.reset(); - state_4.reset(); - state_5.reset(); - - db.close( db.get_unique_lock() ); - db.open( temp, [&]( state_node_ptr ){}, &state_db::pob_comparator, db.get_unique_lock() ); - shared_db_lock = db.get_shared_lock(); - - // BEGIN: Edge case when double production is the first block - - /** - * - * - * / state_1 (height: 1, time: 99, signer: signer1) <--- Double production - * / - * genesis --- state_2 (height: 1, time: 100, signer: signer1) <--- Double production - * - * - */ - - header.set_timestamp( 99 ); - header.set_signer( signer1 ); - header.set_height( 1 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); - state_1 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_1 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == genesis_id ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); - - header.set_timestamp( 100 ); - header.set_signer( signer1 ); - header.set_height( 1 ); - state_id = crypto::hash( crypto::multicodec::sha2_256, 2 ); - state_2 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); - BOOST_REQUIRE( state_2 ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); - db.finalize_node( state_id, shared_db_lock ); - BOOST_CHECK( db.get_head( shared_db_lock )->id() == genesis_id ); - - /** - * Fork heads - * - * genesis - * - */ - - fork_heads = db.get_fork_heads( shared_db_lock ); - BOOST_REQUIRE( fork_heads.size() == 1 ); - it = std::find_if( std::begin( fork_heads ), std::end( fork_heads ), [&]( state_node_ptr p ) { return p->id() == genesis_id; } ); - BOOST_REQUIRE( it != std::end( fork_heads ) ); - fork_heads.clear(); - - // END: Edge case when double production is the first block - -} KOINOS_CATCH_LOG_AND_RETHROW(info) } +{ + try + { + /** + * The final fork graph looks like the following: + * + * / state_1 (100) --- state_4 (110) + * / \ + * genesis --- state_2 (99) \ state_5 (110) + * \ + * \ state_3 (101) + */ + + BOOST_TEST_MESSAGE( "Test default FIFO fork resolution" ); + + auto shared_db_lock = db.get_shared_lock(); + auto genesis_id = db.get_head( shared_db_lock )->id(); + + protocol::block_header header; + header.set_timestamp( 100 ); + + crypto::multihash state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); + auto state_1 = db.create_writable_node( db.get_head( shared_db_lock )->id(), state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_1 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == genesis_id ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); + + header.set_timestamp( 99 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 2 ); + auto state_2 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_2 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); + + header.set_timestamp( 101 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 3 ); + auto state_3 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_3 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); + + header.set_timestamp( 110 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 4 ); + auto state_4 = db.create_writable_node( state_1->id(), state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_4 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_4->id() ); + + state_id = crypto::hash( crypto::multicodec::sha2_256, 5 ); + auto state_5 = db.create_writable_node( state_1->id(), state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_5 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_4->id() ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_4->id() ); + + shared_db_lock.reset(); + state_1.reset(); + state_2.reset(); + state_3.reset(); + state_4.reset(); + state_5.reset(); + + BOOST_TEST_MESSAGE( "Test block time fork resolution" ); + + db.close( db.get_unique_lock() ); + db.open( temp, [ & ]( state_node_ptr ) {}, &state_db::block_time_comparator, db.get_unique_lock() ); + shared_db_lock = db.get_shared_lock(); + + header.set_timestamp( 100 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); + state_1 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_1 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == genesis_id ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); + + header.set_timestamp( 99 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 2 ); + state_2 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_2 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); + + header.set_timestamp( 101 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 3 ); + state_3 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_3 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); + + header.set_timestamp( 110 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 4 ); + state_4 = db.create_writable_node( state_1->id(), state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_4 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_4->id() ); + + state_id = crypto::hash( crypto::multicodec::sha2_256, 5 ); + state_5 = db.create_writable_node( state_1->id(), state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_5 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_4->id() ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_4->id() ); + + shared_db_lock.reset(); + state_1.reset(); + state_2.reset(); + state_3.reset(); + state_4.reset(); + state_5.reset(); + + BOOST_TEST_MESSAGE( "Test pob fork resolution" ); + + db.close( db.get_unique_lock() ); + db.open( temp, [ & ]( state_node_ptr ) {}, &state_db::pob_comparator, db.get_unique_lock() ); + shared_db_lock = db.get_shared_lock(); + + std::string signer1 = "signer1"; + std::string signer2 = "signer2"; + std::string signer3 = "signer3"; + std::string signer4 = "signer4"; + std::string signer5 = "signer5"; + + // BEGIN: Mimic block time behavior (as long as signers are different) + + header.set_timestamp( 100 ); + header.set_signer( signer1 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); + state_1 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_1 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == genesis_id ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); + + header.set_timestamp( 99 ); + header.set_signer( signer2 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 2 ); + state_2 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_2 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); + + header.set_timestamp( 101 ); + header.set_signer( signer3 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 3 ); + state_3 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_3 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); + + header.set_timestamp( 110 ); + header.set_signer( signer4 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 4 ); + state_4 = db.create_writable_node( state_1->id(), state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_4 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_4->id() ); + + header.set_signer( signer5 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 5 ); + state_5 = db.create_writable_node( state_1->id(), state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_5 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_4->id() ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_4->id() ); + + // END: Mimic block time behavior (as long as signers are different) + + shared_db_lock.reset(); + state_1.reset(); + state_2.reset(); + state_3.reset(); + state_4.reset(); + state_5.reset(); + + db.close( db.get_unique_lock() ); + db.open( temp, [ & ]( state_node_ptr ) {}, &state_db::pob_comparator, db.get_unique_lock() ); + shared_db_lock = db.get_shared_lock(); + + // BEGIN: Create two forks, then double produce on the newer fork + + /** + * / state_3 (height: 2, time: 101, signer: signer3) <-- Double + * production + * / + * / state_1 (height: 1, time: 100) - state_4 (height: 2, time: 102, signer: signer3) <-- Double + * production + * / + * genesis --- state_2 (height: 1, time: 99) <-- Resulting head + * + * + */ + + header.set_timestamp( 100 ); + header.set_signer( signer1 ); + header.set_height( 1 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); + state_1 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_1 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == genesis_id ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); + + header.set_timestamp( 99 ); + header.set_signer( signer2 ); + header.set_height( 1 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 2 ); + state_2 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_2 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); + + header.set_timestamp( 101 ); + header.set_signer( signer3 ); + header.set_height( 2 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 3 ); + state_3 = db.create_writable_node( state_1->id(), state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_3 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_3->id() ); + + header.set_timestamp( 102 ); + header.set_signer( signer3 ); + header.set_height( 2 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 4 ); + state_4 = db.create_writable_node( state_1->id(), state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_4 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_3->id() ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_2->id() ); + + /** + * Fork heads + * + * / state_3 (height: 2, time: 101) + * / + * / state_1 (height: 1, time: 100) - state_4 (height: 2, time: 102) + * / + * genesis --- state_2 (height: 1, time: 99) + * + * + */ + + auto fork_heads = db.get_fork_heads( shared_db_lock ); + BOOST_REQUIRE( fork_heads.size() == 3 ); + auto it = std::find_if( std::begin( fork_heads ), + std::end( fork_heads ), + [ & ]( state_node_ptr p ) + { + return p->id() == state_2->id(); + } ); + BOOST_REQUIRE( it != std::end( fork_heads ) ); + it = std::find_if( std::begin( fork_heads ), + std::end( fork_heads ), + [ & ]( state_node_ptr p ) + { + return p->id() == state_3->id(); + } ); + BOOST_REQUIRE( it != std::end( fork_heads ) ); + it = std::find_if( std::begin( fork_heads ), + std::end( fork_heads ), + [ & ]( state_node_ptr p ) + { + return p->id() == state_4->id(); + } ); + BOOST_REQUIRE( it != std::end( fork_heads ) ); + fork_heads.clear(); + + // END: Create two forks, then double produce on the newer fork + + shared_db_lock.reset(); + state_1.reset(); + state_2.reset(); + state_3.reset(); + state_4.reset(); + state_5.reset(); + + db.close( db.get_unique_lock() ); + db.open( temp, [ & ]( state_node_ptr ) {}, &state_db::pob_comparator, db.get_unique_lock() ); + shared_db_lock = db.get_shared_lock(); + + // BEGIN: Create two forks, then double produce on the older fork + + /** + * Resulting head / state_3 (height: 2, time: 101, signer: signer3) <-- Double + * production V / / state_1 (height: 1, time: 99) --- state_4 (height: 2, time: 102, signer: + * signer3) <-- Double production + * / + * genesis --- state_2 (height: 1, time: 100) + * + * + */ + + header.set_timestamp( 99 ); + header.set_signer( signer1 ); + header.set_height( 1 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); + state_1 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_1 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == genesis_id ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); + + header.set_timestamp( 100 ); + header.set_signer( signer2 ); + header.set_height( 1 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 2 ); + state_2 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_2 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); + + header.set_timestamp( 101 ); + header.set_signer( signer3 ); + header.set_height( 2 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 3 ); + state_3 = db.create_writable_node( state_1->id(), state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_3 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_3->id() ); + + header.set_timestamp( 102 ); + header.set_signer( signer3 ); + header.set_height( 2 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 4 ); + state_4 = db.create_writable_node( state_1->id(), state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_4 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_3->id() ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); + + /** + * Fork heads + * + * / state_1 (height: 1, time: 99) + * / + * genesis --- state_2 (height: 1, time: 100) + * + * + */ + + fork_heads = db.get_fork_heads( shared_db_lock ); + BOOST_REQUIRE( fork_heads.size() == 2 ); + it = std::find_if( std::begin( fork_heads ), + std::end( fork_heads ), + [ & ]( state_node_ptr p ) + { + return p->id() == state_1->id(); + } ); + BOOST_REQUIRE( it != std::end( fork_heads ) ); + it = std::find_if( std::begin( fork_heads ), + std::end( fork_heads ), + [ & ]( state_node_ptr p ) + { + return p->id() == state_2->id(); + } ); + BOOST_REQUIRE( it != std::end( fork_heads ) ); + fork_heads.clear(); + + // END: Create two forks, then double produce on the older fork + + shared_db_lock.reset(); + state_1.reset(); + state_2.reset(); + state_3.reset(); + state_4.reset(); + state_5.reset(); + + db.close( db.get_unique_lock() ); + db.open( temp, [ & ]( state_node_ptr ) {}, &state_db::pob_comparator, db.get_unique_lock() ); + shared_db_lock = db.get_shared_lock(); + + // BEGIN: Edge case when double production is the first block + + /** + * + * + * / state_1 (height: 1, time: 99, signer: signer1) <--- Double production + * / + * genesis --- state_2 (height: 1, time: 100, signer: signer1) <--- Double production + * + * + */ + + header.set_timestamp( 99 ); + header.set_signer( signer1 ); + header.set_height( 1 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); + state_1 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_1 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == genesis_id ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); + + header.set_timestamp( 100 ); + header.set_signer( signer1 ); + header.set_height( 1 ); + state_id = crypto::hash( crypto::multicodec::sha2_256, 2 ); + state_2 = db.create_writable_node( genesis_id, state_id, header, shared_db_lock ); + BOOST_REQUIRE( state_2 ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == state_1->id() ); + db.finalize_node( state_id, shared_db_lock ); + BOOST_CHECK( db.get_head( shared_db_lock )->id() == genesis_id ); + + /** + * Fork heads + * + * genesis + * + */ + + fork_heads = db.get_fork_heads( shared_db_lock ); + BOOST_REQUIRE( fork_heads.size() == 1 ); + it = std::find_if( std::begin( fork_heads ), + std::end( fork_heads ), + [ & ]( state_node_ptr p ) + { + return p->id() == genesis_id; + } ); + BOOST_REQUIRE( it != std::end( fork_heads ) ); + fork_heads.clear(); + + // END: Edge case when double production is the first block + } + KOINOS_CATCH_LOG_AND_RETHROW( info ) +} BOOST_AUTO_TEST_CASE( restart_cache ) -{ try { - - auto shared_db_lock = db.get_shared_lock(); - crypto::multihash state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); - auto state_1 = db.create_writable_node( db.get_head( shared_db_lock )->id(), state_id, protocol::block_header(), shared_db_lock ); - BOOST_REQUIRE( state_1 ); +{ + try + { + auto shared_db_lock = db.get_shared_lock(); + crypto::multihash state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); + auto state_1 = db.create_writable_node( db.get_head( shared_db_lock )->id(), + state_id, + protocol::block_header(), + shared_db_lock ); + BOOST_REQUIRE( state_1 ); - object_space space; - std::string a_key = "a"; - std::string a_val = "alice"; + object_space space; + std::string a_key = "a"; + std::string a_val = "alice"; - chain::database_key db_key; - *db_key.mutable_space() = space; - db_key.set_key( a_key ); + chain::database_key db_key; + *db_key.mutable_space() = space; + db_key.set_key( a_key ); - state_1->put_object( space, a_key, &a_val ); + state_1->put_object( space, a_key, &a_val ); - { - auto [ptr, key] = state_1->get_next_object( space, std::string() ); + { + auto [ ptr, key ] = state_1->get_next_object( space, std::string() ); BOOST_REQUIRE( ptr ); BOOST_CHECK_EQUAL( *ptr, a_val ); BOOST_CHECK_EQUAL( key, a_key ); - } + } - db.finalize_node( state_id, shared_db_lock ); - state_1.reset(); - shared_db_lock.reset(); + db.finalize_node( state_id, shared_db_lock ); + state_1.reset(); + shared_db_lock.reset(); - db.commit_node( state_id, db.get_unique_lock() ); + db.commit_node( state_id, db.get_unique_lock() ); - db.close( db.get_unique_lock() ); - db.open( temp, [&]( state_db::state_node_ptr root ){}, &state_db::fifo_comparator, db.get_unique_lock() ); - shared_db_lock = db.get_shared_lock(); + db.close( db.get_unique_lock() ); + db.open( temp, [ & ]( state_db::state_node_ptr root ) {}, &state_db::fifo_comparator, db.get_unique_lock() ); + shared_db_lock = db.get_shared_lock(); - state_1 = db.get_root( shared_db_lock ); - { - auto [ptr, key] = state_1->get_next_object( space, std::string() ); + state_1 = db.get_root( shared_db_lock ); + { + auto [ ptr, key ] = state_1->get_next_object( space, std::string() ); BOOST_REQUIRE( ptr ); BOOST_CHECK_EQUAL( *ptr, a_val ); BOOST_CHECK_EQUAL( key, a_key ); - } - -} KOINOS_CATCH_LOG_AND_RETHROW(info) } + } + } + KOINOS_CATCH_LOG_AND_RETHROW( info ) +} BOOST_AUTO_TEST_CASE( persistence ) -{ try { - - BOOST_TEST_MESSAGE( "Checking persistence when backed by rocksdb" ); - object_space space; - std::string a_key = "a"; - std::string a_val = "alice"; - - auto shared_db_lock = db.get_shared_lock(); - - chain::database_key db_key; - *db_key.mutable_space() = space; - db_key.set_key( a_key ); - auto key_size = util::converter::as< std::string >( db_key ).size(); - - crypto::multihash state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); - auto state_1 = db.create_writable_node( db.get_head( shared_db_lock )->id(), state_id, protocol::block_header(), shared_db_lock ); - BOOST_REQUIRE( state_1 ); - BOOST_CHECK_EQUAL( state_1->put_object( space, a_key, &a_val ), a_val.size() + key_size ); - - db.finalize_node( state_id, shared_db_lock ); - - auto ptr = state_1->get_object( space, a_key ); - BOOST_REQUIRE( ptr ); - BOOST_CHECK_EQUAL( *ptr, a_val ); - - state_1.reset(); - shared_db_lock.reset(); - db.commit_node( state_id, db.get_unique_lock() ); - - db.close( db.get_unique_lock() ); - db.open( temp, [&]( state_db::state_node_ptr root ){}, &state_db::fifo_comparator, db.get_unique_lock() ); - - shared_db_lock = db.get_shared_lock(); - state_1 = db.get_node( state_id, shared_db_lock ); - BOOST_REQUIRE( state_1 ); - - ptr = state_1->get_object( space, a_key ); - BOOST_REQUIRE( ptr ); - BOOST_CHECK_EQUAL( *ptr, a_val ); - - state_1.reset(); - shared_db_lock.reset(); - db.close( db.get_unique_lock() ); - - BOOST_TEST_MESSAGE( "Checking transience when backed by std::map" ); - db.open( {}, [&]( state_db::state_node_ptr root ){}, &state_db::fifo_comparator, db.get_unique_lock() ); - - shared_db_lock = db.get_shared_lock(); - state_1 = db.create_writable_node( db.get_head( shared_db_lock )->id(), state_id, protocol::block_header(), shared_db_lock ); - BOOST_REQUIRE( state_1 ); - BOOST_CHECK_EQUAL( state_1->put_object( space, a_key, &a_val ), a_val.size() + key_size ); - - db.finalize_node( state_id, shared_db_lock ); - ptr = state_1->get_object( space, a_key ); - BOOST_REQUIRE( ptr ); - BOOST_CHECK_EQUAL( *ptr, a_val ); - - state_1.reset(); - shared_db_lock.reset(); - db.commit_node( state_id, db.get_unique_lock() ); - - db.close( db.get_unique_lock() ); - db.open( {}, [&]( state_db::state_node_ptr root ){}, &state_db::fifo_comparator, db.get_unique_lock() ); - - shared_db_lock = db.get_shared_lock(); - state_1 = db.get_node( state_id, shared_db_lock ); - BOOST_REQUIRE( !state_1 ); - - ptr = db.get_head( shared_db_lock )->get_object( space, a_key ); - BOOST_REQUIRE( !ptr ); - -} KOINOS_CATCH_LOG_AND_RETHROW(info) } +{ + try + { + BOOST_TEST_MESSAGE( "Checking persistence when backed by rocksdb" ); + object_space space; + std::string a_key = "a"; + std::string a_val = "alice"; + + auto shared_db_lock = db.get_shared_lock(); + + chain::database_key db_key; + *db_key.mutable_space() = space; + db_key.set_key( a_key ); + auto key_size = util::converter::as< std::string >( db_key ).size(); + + crypto::multihash state_id = crypto::hash( crypto::multicodec::sha2_256, 1 ); + auto state_1 = db.create_writable_node( db.get_head( shared_db_lock )->id(), + state_id, + protocol::block_header(), + shared_db_lock ); + BOOST_REQUIRE( state_1 ); + BOOST_CHECK_EQUAL( state_1->put_object( space, a_key, &a_val ), a_val.size() + key_size ); + + db.finalize_node( state_id, shared_db_lock ); + + auto ptr = state_1->get_object( space, a_key ); + BOOST_REQUIRE( ptr ); + BOOST_CHECK_EQUAL( *ptr, a_val ); + + state_1.reset(); + shared_db_lock.reset(); + db.commit_node( state_id, db.get_unique_lock() ); + + db.close( db.get_unique_lock() ); + db.open( temp, [ & ]( state_db::state_node_ptr root ) {}, &state_db::fifo_comparator, db.get_unique_lock() ); + + shared_db_lock = db.get_shared_lock(); + state_1 = db.get_node( state_id, shared_db_lock ); + BOOST_REQUIRE( state_1 ); + + ptr = state_1->get_object( space, a_key ); + BOOST_REQUIRE( ptr ); + BOOST_CHECK_EQUAL( *ptr, a_val ); + + state_1.reset(); + shared_db_lock.reset(); + db.close( db.get_unique_lock() ); + + BOOST_TEST_MESSAGE( "Checking transience when backed by std::map" ); + db.open( {}, [ & ]( state_db::state_node_ptr root ) {}, &state_db::fifo_comparator, db.get_unique_lock() ); + + shared_db_lock = db.get_shared_lock(); + state_1 = db.create_writable_node( db.get_head( shared_db_lock )->id(), + state_id, + protocol::block_header(), + shared_db_lock ); + BOOST_REQUIRE( state_1 ); + BOOST_CHECK_EQUAL( state_1->put_object( space, a_key, &a_val ), a_val.size() + key_size ); + + db.finalize_node( state_id, shared_db_lock ); + ptr = state_1->get_object( space, a_key ); + BOOST_REQUIRE( ptr ); + BOOST_CHECK_EQUAL( *ptr, a_val ); + + state_1.reset(); + shared_db_lock.reset(); + db.commit_node( state_id, db.get_unique_lock() ); + + db.close( db.get_unique_lock() ); + db.open( {}, [ & ]( state_db::state_node_ptr root ) {}, &state_db::fifo_comparator, db.get_unique_lock() ); + + shared_db_lock = db.get_shared_lock(); + state_1 = db.get_node( state_id, shared_db_lock ); + BOOST_REQUIRE( !state_1 ); + + ptr = db.get_head( shared_db_lock )->get_object( space, a_key ); + BOOST_REQUIRE( !ptr ); + } + KOINOS_CATCH_LOG_AND_RETHROW( info ) +} BOOST_AUTO_TEST_CASE( clone_node ) -{ try { - BOOST_TEST_MESSAGE( "Check clone of un-finalized node" ); - - object_space space; - std::string a_key = "a"; - std::string a_val = "alice"; - std::string b_key = "bob"; - std::string b_val = "bob"; - std::string c_key = "charlie"; - std::string c_val = "charlie"; - std::string d_key = "dave"; - std::string d_val = "dave"; - - auto shared_db_lock = db.get_shared_lock(); - - chain::database_key db_key; - *db_key.mutable_space() = space; - db_key.set_key( a_key ); - - crypto::multihash state_1a_id = crypto::hash( crypto::multicodec::sha2_256, 0x1a ); - auto state_1a = db.create_writable_node( db.get_head( shared_db_lock )->id(), state_1a_id, protocol::block_header(), shared_db_lock ); - BOOST_REQUIRE( state_1a ); - state_1a->put_object( space, a_key, &a_val ); - state_1a->put_object( space, b_key, &b_val ); - db.finalize_node( state_1a_id, shared_db_lock ); - - crypto::multihash state_2a_id = crypto::hash( crypto::multicodec::sha2_256, 0x2a ); - auto state_2a = db.create_writable_node( state_1a_id, state_2a_id, protocol::block_header(), shared_db_lock ); - BOOST_REQUIRE( state_2a ); - state_2a->put_object( space, c_key, &c_val ); - state_2a->remove_object( space, a_key ); - - crypto::multihash state_2b_id = crypto::hash( crypto::multicodec::sha2_256, 0x2b ); - auto state_2b = db.clone_node( state_2a_id, state_2b_id, protocol::block_header(), shared_db_lock ); - BOOST_REQUIRE( state_2b ); - BOOST_CHECK( !state_2b->is_finalized() ); - BOOST_CHECK( !state_2b->get_object( space, a_key ) ); - BOOST_REQUIRE( state_2b->get_object( space, b_key ) ); - BOOST_CHECK_EQUAL( *state_2b->get_object( space, b_key ), b_val ); - BOOST_REQUIRE( state_2b->get_object( space, c_key ) ); - BOOST_CHECK_EQUAL( *state_2b->get_object( space, c_key ), c_val ); - - state_2b->remove_object( space, b_key ); - state_2b->put_object( space, d_key, &d_val ); - - BOOST_REQUIRE( state_2a->get_object( space, b_key ) ); - BOOST_CHECK_EQUAL( *state_2a->get_object( space, b_key ), b_val ); - BOOST_CHECK( !state_2a->get_object( space, d_key ) ); - - BOOST_TEST_MESSAGE( "Checking clone of a finalized node" ); - - crypto::multihash state_1b_id = crypto::hash( crypto::multicodec::sha2_256, 0x1b ); - BOOST_REQUIRE_THROW( db.clone_node( state_1a_id, state_1b_id, protocol::block_header(), shared_db_lock ), illegal_argument ); -} KOINOS_CATCH_LOG_AND_RETHROW(info) } +{ + try + { + BOOST_TEST_MESSAGE( "Check clone of un-finalized node" ); + + object_space space; + std::string a_key = "a"; + std::string a_val = "alice"; + std::string b_key = "bob"; + std::string b_val = "bob"; + std::string c_key = "charlie"; + std::string c_val = "charlie"; + std::string d_key = "dave"; + std::string d_val = "dave"; + + auto shared_db_lock = db.get_shared_lock(); + + chain::database_key db_key; + *db_key.mutable_space() = space; + db_key.set_key( a_key ); + + crypto::multihash state_1a_id = crypto::hash( crypto::multicodec::sha2_256, 0x1a ); + auto state_1a = db.create_writable_node( db.get_head( shared_db_lock )->id(), + state_1a_id, + protocol::block_header(), + shared_db_lock ); + BOOST_REQUIRE( state_1a ); + state_1a->put_object( space, a_key, &a_val ); + state_1a->put_object( space, b_key, &b_val ); + db.finalize_node( state_1a_id, shared_db_lock ); + + crypto::multihash state_2a_id = crypto::hash( crypto::multicodec::sha2_256, 0x2a ); + auto state_2a = db.create_writable_node( state_1a_id, state_2a_id, protocol::block_header(), shared_db_lock ); + BOOST_REQUIRE( state_2a ); + state_2a->put_object( space, c_key, &c_val ); + state_2a->remove_object( space, a_key ); + + crypto::multihash state_2b_id = crypto::hash( crypto::multicodec::sha2_256, 0x2b ); + auto state_2b = db.clone_node( state_2a_id, state_2b_id, protocol::block_header(), shared_db_lock ); + BOOST_REQUIRE( state_2b ); + BOOST_CHECK( !state_2b->is_finalized() ); + BOOST_CHECK( !state_2b->get_object( space, a_key ) ); + BOOST_REQUIRE( state_2b->get_object( space, b_key ) ); + BOOST_CHECK_EQUAL( *state_2b->get_object( space, b_key ), b_val ); + BOOST_REQUIRE( state_2b->get_object( space, c_key ) ); + BOOST_CHECK_EQUAL( *state_2b->get_object( space, c_key ), c_val ); + + state_2b->remove_object( space, b_key ); + state_2b->put_object( space, d_key, &d_val ); + + BOOST_REQUIRE( state_2a->get_object( space, b_key ) ); + BOOST_CHECK_EQUAL( *state_2a->get_object( space, b_key ), b_val ); + BOOST_CHECK( !state_2a->get_object( space, d_key ) ); + + BOOST_TEST_MESSAGE( "Checking clone of a finalized node" ); + + crypto::multihash state_1b_id = crypto::hash( crypto::multicodec::sha2_256, 0x1b ); + BOOST_REQUIRE_THROW( db.clone_node( state_1a_id, state_1b_id, protocol::block_header(), shared_db_lock ), + illegal_argument ); + } + KOINOS_CATCH_LOG_AND_RETHROW( info ) +} BOOST_AUTO_TEST_CASE( get_all_nodes ) -{ try { - BOOST_TEST_MESSAGE( "Create state nodes" ); - - auto shared_db_lock = db.get_shared_lock(); - auto root_id = db.get_root( shared_db_lock )->id(); - - crypto::multihash state_1a_id = crypto::hash( crypto::multicodec::sha2_256, 0x1a ); - auto state_1a = db.create_writable_node( root_id, state_1a_id, protocol::block_header(), shared_db_lock ); - BOOST_REQUIRE( state_1a ); - db.finalize_node( state_1a_id, shared_db_lock ); - - crypto::multihash state_1b_id = crypto::hash( crypto::multicodec::sha2_256, 0x1b ); - auto state_1b = db.create_writable_node( root_id, state_1b_id, protocol::block_header(), shared_db_lock ); - BOOST_REQUIRE( state_1b ); - - crypto::multihash state_2a_id = crypto::hash( crypto::multicodec::sha2_256, 0x2a ); - auto state_2a = db.create_writable_node( state_1a_id, state_2a_id, protocol::block_header(), shared_db_lock ); - BOOST_REQUIRE( state_2a ); - - crypto::multihash state_2b_id = crypto::hash( crypto::multicodec::sha2_256, 0x2b ); - auto state_2b = db.create_writable_node( state_1a_id, state_2b_id, protocol::block_header(), shared_db_lock ); - BOOST_REQUIRE( state_2b ); - - BOOST_TEST_MESSAGE( "Check all state nodes" ); - - auto nodes = db.get_all_nodes( shared_db_lock ); - BOOST_REQUIRE_EQUAL( nodes.size(), 5 ); - BOOST_CHECK( nodes[0]->id() == root_id ); - BOOST_CHECK( nodes[1]->id() == state_1b_id ); - BOOST_CHECK( nodes[2]->id() == state_2a_id ); - BOOST_CHECK( nodes[3]->id() == state_1a_id ); - BOOST_CHECK( nodes[4]->id() == state_2b_id ); - - BOOST_TEST_MESSAGE( "Commit 1a" ); - - nodes.clear(); - state_1a.reset(); - state_1b.reset(); - state_2a.reset(); - state_2b.reset(); - shared_db_lock.reset(); - - auto unique_db_lock = db.get_unique_lock(); - db.commit_node( state_1a_id, unique_db_lock ); - - BOOST_TEST_MESSAGE( "Check all state nodes" ); - - nodes = db.get_all_nodes( unique_db_lock ); - BOOST_REQUIRE_EQUAL( nodes.size(), 3 ); - BOOST_CHECK( nodes[0]->id() == state_2a_id ); - BOOST_CHECK( nodes[1]->id() == state_1a_id ); - BOOST_CHECK( nodes[2]->id() == state_2b_id ); - -} KOINOS_CATCH_LOG_AND_RETHROW(info) } +{ + try + { + BOOST_TEST_MESSAGE( "Create state nodes" ); + + auto shared_db_lock = db.get_shared_lock(); + auto root_id = db.get_root( shared_db_lock )->id(); + + crypto::multihash state_1a_id = crypto::hash( crypto::multicodec::sha2_256, 0x1a ); + auto state_1a = db.create_writable_node( root_id, state_1a_id, protocol::block_header(), shared_db_lock ); + BOOST_REQUIRE( state_1a ); + db.finalize_node( state_1a_id, shared_db_lock ); + + crypto::multihash state_1b_id = crypto::hash( crypto::multicodec::sha2_256, 0x1b ); + auto state_1b = db.create_writable_node( root_id, state_1b_id, protocol::block_header(), shared_db_lock ); + BOOST_REQUIRE( state_1b ); + + crypto::multihash state_2a_id = crypto::hash( crypto::multicodec::sha2_256, 0x2a ); + auto state_2a = db.create_writable_node( state_1a_id, state_2a_id, protocol::block_header(), shared_db_lock ); + BOOST_REQUIRE( state_2a ); + + crypto::multihash state_2b_id = crypto::hash( crypto::multicodec::sha2_256, 0x2b ); + auto state_2b = db.create_writable_node( state_1a_id, state_2b_id, protocol::block_header(), shared_db_lock ); + BOOST_REQUIRE( state_2b ); + + BOOST_TEST_MESSAGE( "Check all state nodes" ); + + auto nodes = db.get_all_nodes( shared_db_lock ); + BOOST_REQUIRE_EQUAL( nodes.size(), 5 ); + BOOST_CHECK( nodes[ 0 ]->id() == root_id ); + BOOST_CHECK( nodes[ 1 ]->id() == state_1b_id ); + BOOST_CHECK( nodes[ 2 ]->id() == state_2a_id ); + BOOST_CHECK( nodes[ 3 ]->id() == state_1a_id ); + BOOST_CHECK( nodes[ 4 ]->id() == state_2b_id ); + + BOOST_TEST_MESSAGE( "Commit 1a" ); + + nodes.clear(); + state_1a.reset(); + state_1b.reset(); + state_2a.reset(); + state_2b.reset(); + shared_db_lock.reset(); + + auto unique_db_lock = db.get_unique_lock(); + db.commit_node( state_1a_id, unique_db_lock ); + + BOOST_TEST_MESSAGE( "Check all state nodes" ); + + nodes = db.get_all_nodes( unique_db_lock ); + BOOST_REQUIRE_EQUAL( nodes.size(), 3 ); + BOOST_CHECK( nodes[ 0 ]->id() == state_2a_id ); + BOOST_CHECK( nodes[ 1 ]->id() == state_1a_id ); + BOOST_CHECK( nodes[ 2 ]->id() == state_2b_id ); + } + KOINOS_CATCH_LOG_AND_RETHROW( info ) +} BOOST_AUTO_TEST_SUITE_END() From 7d2d86e939f12f982cf6075a5cfaa0eea9efd6c3 Mon Sep 17 00:00:00 2001 From: Michael Vandeberg Date: Mon, 11 Dec 2023 17:35:36 -0800 Subject: [PATCH 08/26] static-coverage -> static-analysis --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 8c7339e..186d282 100644 --- a/.travis.yml +++ b/.travis.yml @@ -19,7 +19,7 @@ jobs: - os: linux dist: jammy env: - - RUN_TIME=static-coverage + - RUN_TIME=static-analysis - MATRIX_EVAL="CC=clang && CXX=clang++" - os: linux dist: jammy From b5cc7a5d59c45221c6594ddfea5e835730d3b8c9 Mon Sep 17 00:00:00 2001 From: Steve Gerbino Date: Thu, 14 Dec 2023 10:11:58 -0500 Subject: [PATCH 09/26] Cleanup State DB CMake issues --- .travis.yml | 6 ++++-- CMakeLists.txt | 7 +++---- src/CMakeLists.txt | 8 ++------ tests/CMakeLists.txt | 35 +++++++++++++++++++++-------------- tools/ci/test.sh | 9 +++++---- 5 files changed, 35 insertions(+), 30 deletions(-) diff --git a/.travis.yml b/.travis.yml index 186d282..040abae 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,7 +2,6 @@ language: c++ cache: ccache: true - pip: true addons: apt: @@ -12,6 +11,9 @@ addons: - llvm - lcov - ruby + - gcc-12 + - g++-12 + - valgrind update: true jobs: @@ -30,7 +32,7 @@ jobs: dist: jammy env: - RUN_TYPE=test - - MATRIX_EVAL="CC=gcc && CXX=g++" + - MATRIX_EVAL="CC=gcc-12 && CXX=g++-12" - os: linux dist: jammy env: diff --git a/CMakeLists.txt b/CMakeLists.txt index 8d6005b..d7cf515 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -16,8 +16,7 @@ include(FetchContent) FetchContent_Declare( koinos_cmake GIT_REPOSITORY https://github.com/koinos/koinos-cmake.git - GIT_TAG 4967f0548e3f4f555ac95494413f30ac9d0ced4d -) + GIT_TAG 65e4239b4634887778c6c662d1b7cb8f0ed0cacc) FetchContent_MakeAvailable(koinos_cmake) include("${koinos_cmake_SOURCE_DIR}/Koinos.cmake") @@ -30,8 +29,8 @@ project(koinos_state_db koinos_define_version() koinos_add_package(Boost CONFIG REQUIRED - ADD_COMPONENTS log exception test - FIND_COMPONENTS log log_setup + ADD_COMPONENTS log exception program_options test + FIND_COMPONENTS log log_setup exception program_options ) koinos_add_package(rocksdb NAME RocksDB CONFIG REQUIRED) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 1d43c8c..f994468 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -42,10 +42,7 @@ target_include_directories( $ $ PRIVATE - ${PROJECT_SOURCE_DIR}/src -) - -add_library(Koinos::state_db ALIAS state_db) + $) koinos_install(TARGETS state_db) @@ -53,5 +50,4 @@ install( DIRECTORY ${PROJECT_SOURCE_DIR}/include DESTINATION - ${CMAKE_INSTALL_PREFIX} -) + ${CMAKE_INSTALL_PREFIX}) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 9c388ac..76e0d13 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -4,20 +4,28 @@ endif() include(CTest) -enable_testing() +koinos_add_test( + state_db_tests + SOURCES + main.cpp + state_db_test.cpp) -koinos_add_test(state_db_tests - SOURCES - main.cpp - state_db_test.cpp -) +target_link_libraries( + state_db_tests + PRIVATE + state_db + Koinos::proto + Koinos::crypto + Koinos::log + Koinos::util + Koinos::exception) -target_link_libraries(state_db_tests Koinos::proto Koinos::crypto Koinos::state_db Koinos::log Koinos::util Koinos::exception ${PLATFORM_SPECIFIC_LIBS}) -target_include_directories(state_db_tests PUBLIC - ${PROJECT_SOURCE_DIR}/src # Private headers - $ - $ # /include -) +target_include_directories( + state_db_tests + PUBLIC + $ + $ + $ Date: Thu, 14 Dec 2023 10:24:36 -0500 Subject: [PATCH 10/26] Fix typo in CI test runner --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 040abae..d74f235 100644 --- a/.travis.yml +++ b/.travis.yml @@ -21,7 +21,7 @@ jobs: - os: linux dist: jammy env: - - RUN_TIME=static-analysis + - RUN_TYPE=static-analysis - MATRIX_EVAL="CC=clang && CXX=clang++" - os: linux dist: jammy From 7fce0842b8a7d9aafc82d596958dcdbff272d42d Mon Sep 17 00:00:00 2001 From: Steve Gerbino Date: Thu, 14 Dec 2023 10:46:05 -0500 Subject: [PATCH 11/26] Fix test binary name, normalize build script --- tools/ci/build.sh | 1 + tools/ci/test.sh | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/ci/build.sh b/tools/ci/build.sh index b3aef01..d0bbba0 100755 --- a/tools/ci/build.sh +++ b/tools/ci/build.sh @@ -12,6 +12,7 @@ if [ "$RUN_TYPE" = "test" ]; then elif [ "$RUN_TYPE" = "coverage" ]; then cmake -DCMAKE_BUILD_TYPE=Debug -DCOVERAGE=ON .. cmake --build . --config Debug --parallel 3 --target coverage + lcov --version elif [ "$RUN_TYPE" = "static-analysis" ]; then cmake -DCMAKE_BUILD_TYPE=Debug -DSTATIC_ANALYSIS=ON .. cmake --build . --config Debug --parallel 3 diff --git a/tools/ci/test.sh b/tools/ci/test.sh index a75b2eb..502a766 100755 --- a/tools/ci/test.sh +++ b/tools/ci/test.sh @@ -11,5 +11,5 @@ if [ "$RUN_TYPE" = "test" ]; then exec ctest -j3 --output-on-failure elif [ "$RUN_TYPE" = "coverage" ]; then cd $TRAVIS_BUILD_DIR/build/tests - exec valgrind --error-exitcode=1 --leak-check=yes ./koinos_state_db_tests + exec valgrind --error-exitcode=1 --leak-check=yes ./state_db_tests fi From 0ee172df0a6a4b99a093dbcd38c5af3eb301d4c5 Mon Sep 17 00:00:00 2001 From: Steve Gerbino Date: Thu, 14 Dec 2023 10:46:44 -0500 Subject: [PATCH 12/26] Format tests --- tests/state_db_test.cpp | 72 ++++++++++++++++++++++++++++++++++------- 1 file changed, 60 insertions(+), 12 deletions(-) diff --git a/tests/state_db_test.cpp b/tests/state_db_test.cpp index cd2f53a..1bf61a4 100644 --- a/tests/state_db_test.cpp +++ b/tests/state_db_test.cpp @@ -50,7 +50,11 @@ struct state_db_fixture temp = std::filesystem::temp_directory_path() / util::random_alphanumeric( 8 ); std::filesystem::create_directory( temp ); - db.open( temp, [ & ]( state_db::state_node_ptr root ) {}, fork_resolution_algorithm::fifo, db.get_unique_lock() ); + db.open( + temp, + [ & ]( state_db::state_node_ptr root ) {}, + fork_resolution_algorithm::fifo, + db.get_unique_lock() ); } ~state_db_fixture() @@ -618,7 +622,11 @@ BOOST_AUTO_TEST_CASE( reset_test ) BOOST_CHECK_THROW( db.commit_node( crypto::hash( crypto::multicodec::sha2_256, 1 ), db.get_unique_lock() ), koinos::exception ); - db.open( temp, []( state_db::state_node_ptr root ) {}, &state_db::fifo_comparator, db.get_unique_lock() ); + db.open( + temp, + []( state_db::state_node_ptr root ) {}, + &state_db::fifo_comparator, + db.get_unique_lock() ); shared_db_lock = db.get_shared_lock(); @@ -650,7 +658,11 @@ BOOST_AUTO_TEST_CASE( reset_test ) shared_db_lock.reset(); state_1.reset(); db.close( db.get_unique_lock() ); - db.open( temp, []( state_db::state_node_ptr root ) {}, &state_db::fifo_comparator, db.get_unique_lock() ); + db.open( + temp, + []( state_db::state_node_ptr root ) {}, + &state_db::fifo_comparator, + db.get_unique_lock() ); // State node was committed and should exist on open shared_db_lock = db.get_shared_lock(); @@ -1239,7 +1251,11 @@ BOOST_AUTO_TEST_CASE( fork_resolution ) BOOST_TEST_MESSAGE( "Test block time fork resolution" ); db.close( db.get_unique_lock() ); - db.open( temp, [ & ]( state_node_ptr ) {}, &state_db::block_time_comparator, db.get_unique_lock() ); + db.open( + temp, + [ & ]( state_node_ptr ) {}, + &state_db::block_time_comparator, + db.get_unique_lock() ); shared_db_lock = db.get_shared_lock(); header.set_timestamp( 100 ); @@ -1291,7 +1307,11 @@ BOOST_AUTO_TEST_CASE( fork_resolution ) BOOST_TEST_MESSAGE( "Test pob fork resolution" ); db.close( db.get_unique_lock() ); - db.open( temp, [ & ]( state_node_ptr ) {}, &state_db::pob_comparator, db.get_unique_lock() ); + db.open( + temp, + [ & ]( state_node_ptr ) {}, + &state_db::pob_comparator, + db.get_unique_lock() ); shared_db_lock = db.get_shared_lock(); std::string signer1 = "signer1"; @@ -1356,7 +1376,11 @@ BOOST_AUTO_TEST_CASE( fork_resolution ) state_5.reset(); db.close( db.get_unique_lock() ); - db.open( temp, [ & ]( state_node_ptr ) {}, &state_db::pob_comparator, db.get_unique_lock() ); + db.open( + temp, + [ & ]( state_node_ptr ) {}, + &state_db::pob_comparator, + db.get_unique_lock() ); shared_db_lock = db.get_shared_lock(); // BEGIN: Create two forks, then double produce on the newer fork @@ -1460,7 +1484,11 @@ BOOST_AUTO_TEST_CASE( fork_resolution ) state_5.reset(); db.close( db.get_unique_lock() ); - db.open( temp, [ & ]( state_node_ptr ) {}, &state_db::pob_comparator, db.get_unique_lock() ); + db.open( + temp, + [ & ]( state_node_ptr ) {}, + &state_db::pob_comparator, + db.get_unique_lock() ); shared_db_lock = db.get_shared_lock(); // BEGIN: Create two forks, then double produce on the older fork @@ -1553,7 +1581,11 @@ BOOST_AUTO_TEST_CASE( fork_resolution ) state_5.reset(); db.close( db.get_unique_lock() ); - db.open( temp, [ & ]( state_node_ptr ) {}, &state_db::pob_comparator, db.get_unique_lock() ); + db.open( + temp, + [ & ]( state_node_ptr ) {}, + &state_db::pob_comparator, + db.get_unique_lock() ); shared_db_lock = db.get_shared_lock(); // BEGIN: Edge case when double production is the first block @@ -1648,7 +1680,11 @@ BOOST_AUTO_TEST_CASE( restart_cache ) db.commit_node( state_id, db.get_unique_lock() ); db.close( db.get_unique_lock() ); - db.open( temp, [ & ]( state_db::state_node_ptr root ) {}, &state_db::fifo_comparator, db.get_unique_lock() ); + db.open( + temp, + [ & ]( state_db::state_node_ptr root ) {}, + &state_db::fifo_comparator, + db.get_unique_lock() ); shared_db_lock = db.get_shared_lock(); state_1 = db.get_root( shared_db_lock ); @@ -1698,7 +1734,11 @@ BOOST_AUTO_TEST_CASE( persistence ) db.commit_node( state_id, db.get_unique_lock() ); db.close( db.get_unique_lock() ); - db.open( temp, [ & ]( state_db::state_node_ptr root ) {}, &state_db::fifo_comparator, db.get_unique_lock() ); + db.open( + temp, + [ & ]( state_db::state_node_ptr root ) {}, + &state_db::fifo_comparator, + db.get_unique_lock() ); shared_db_lock = db.get_shared_lock(); state_1 = db.get_node( state_id, shared_db_lock ); @@ -1713,7 +1753,11 @@ BOOST_AUTO_TEST_CASE( persistence ) db.close( db.get_unique_lock() ); BOOST_TEST_MESSAGE( "Checking transience when backed by std::map" ); - db.open( {}, [ & ]( state_db::state_node_ptr root ) {}, &state_db::fifo_comparator, db.get_unique_lock() ); + db.open( + {}, + [ & ]( state_db::state_node_ptr root ) {}, + &state_db::fifo_comparator, + db.get_unique_lock() ); shared_db_lock = db.get_shared_lock(); state_1 = db.create_writable_node( db.get_head( shared_db_lock )->id(), @@ -1733,7 +1777,11 @@ BOOST_AUTO_TEST_CASE( persistence ) db.commit_node( state_id, db.get_unique_lock() ); db.close( db.get_unique_lock() ); - db.open( {}, [ & ]( state_db::state_node_ptr root ) {}, &state_db::fifo_comparator, db.get_unique_lock() ); + db.open( + {}, + [ & ]( state_db::state_node_ptr root ) {}, + &state_db::fifo_comparator, + db.get_unique_lock() ); shared_db_lock = db.get_shared_lock(); state_1 = db.get_node( state_id, shared_db_lock ); From 2fbc3e806a305feb5f5afecd09b56c95cb9904a6 Mon Sep 17 00:00:00 2001 From: Steve Gerbino Date: Thu, 14 Dec 2023 11:53:45 -0500 Subject: [PATCH 13/26] Remove valgrind from build --- tools/ci/test.sh | 3 --- 1 file changed, 3 deletions(-) diff --git a/tools/ci/test.sh b/tools/ci/test.sh index 502a766..1467022 100755 --- a/tools/ci/test.sh +++ b/tools/ci/test.sh @@ -9,7 +9,4 @@ if [ "$RUN_TYPE" = "test" ]; then cd $TRAVIS_BUILD_DIR/build/tests exec ctest -j3 --output-on-failure -elif [ "$RUN_TYPE" = "coverage" ]; then - cd $TRAVIS_BUILD_DIR/build/tests - exec valgrind --error-exitcode=1 --leak-check=yes ./state_db_tests fi From ceaa402341ff7b19f81e6871fb6822ea30f85a0e Mon Sep 17 00:00:00 2001 From: Michael Vandeberg Date: Mon, 26 Feb 2024 16:16:47 -0800 Subject: [PATCH 14/26] Add sanitize build to CI --- .travis.yml | 25 ++++++++++++++++--------- CMakeLists.txt | 2 +- README.md | 9 +++++++++ tools/ci/build.sh | 18 ++++++++++++++++++ tools/ci/test.sh | 11 ++++++++++- 5 files changed, 54 insertions(+), 11 deletions(-) diff --git a/.travis.yml b/.travis.yml index d74f235..a16a23a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -9,31 +9,38 @@ addons: - clang - clang-format - llvm + - llvm-dev - lcov - ruby - - gcc-12 - - g++-12 - - valgrind - update: true jobs: include: - - os: linux + - name: "Static Analysis" + os: linux dist: jammy env: - RUN_TYPE=static-analysis - MATRIX_EVAL="CC=clang && CXX=clang++" - - os: linux + - name: "Sanitizer" + os: linux + dist: jammy + env: + - RUN_TYPE=sanitizer + - MATRIX_EVAL="CC=clang && CXX=clang++" + - name: "Coverage" + os: linux dist: jammy env: - RUN_TYPE=coverage - MATRIX_EVAL="CC=clang && CXX=clang++" - - os: linux + - name: "GCC Unit Tests" + os: linux dist: jammy env: - RUN_TYPE=test - - MATRIX_EVAL="CC=gcc-12 && CXX=g++-12" - - os: linux + - MATRIX_EVAL="CC=gcc && CXX=g++" + - name: "Clang Unit Tests" + os: linux dist: jammy env: - RUN_TYPE=test diff --git a/CMakeLists.txt b/CMakeLists.txt index d7cf515..def7dac 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -16,7 +16,7 @@ include(FetchContent) FetchContent_Declare( koinos_cmake GIT_REPOSITORY https://github.com/koinos/koinos-cmake.git - GIT_TAG 65e4239b4634887778c6c662d1b7cb8f0ed0cacc) + GIT_TAG 897f11188f1761ddd810d77839417549e68465aa) FetchContent_MakeAvailable(koinos_cmake) include("${koinos_cmake_SOURCE_DIR}/Koinos.cmake") diff --git a/README.md b/README.md index ee311a9..e756120 100644 --- a/README.md +++ b/README.md @@ -62,6 +62,15 @@ cmake -D CMAKE_BUILD_TYPE=Debug -D COVERAGE=ON .. cmake --build . --config Debug --parallel 3 --target coverage ``` +You can run tests in different sanitizer profiles. Those profiles are None (Default), Address, Stack, and Thread. Currently, these are only known to work with clang, but may work with gcc with additional environment configuration. + +``` +cmake -D CMAKE_BUILT_TYPE=Debug -D SANITIZER=Address .. +cmake --build . --config Debug --parallel --target util_tests +cd tests +ctest -j +``` + ### Formatting Formatting of the source code is enforced by ClangFormat. If ClangFormat is installed, build targets will be automatically generated. You can review the library's code style by uploading the included `.clang-format` to https://clang-format-configurator.site/. diff --git a/tools/ci/build.sh b/tools/ci/build.sh index d0bbba0..c6931e8 100755 --- a/tools/ci/build.sh +++ b/tools/ci/build.sh @@ -16,4 +16,22 @@ elif [ "$RUN_TYPE" = "coverage" ]; then elif [ "$RUN_TYPE" = "static-analysis" ]; then cmake -DCMAKE_BUILD_TYPE=Debug -DSTATIC_ANALYSIS=ON .. cmake --build . --config Debug --parallel 3 +elif [ "$RUN_TYPE" = "sanitizer" ]; then + popd + mkdir build-address + pushd build-address + cmake -DCMAKE_BUILD_TYPE=Debug -DSANITIZER=Address .. + cmake --build . --config Debug --parallel 3 + + popd + mkdir build-stack + pushd build-stack + cmake -DCMAKE_BUILD_TYPE=Debug -DSANITIZER=Stack .. + cmake --build . --config Debug --parallel 3 + + popd + mkdir build-thread + pushd build-thread + cmake -DCMAKE_BUILD_TYPE=Debug -DSANITIZER=Thread .. + cmake --build . --config Debug --parallel 3 fi diff --git a/tools/ci/test.sh b/tools/ci/test.sh index 1467022..b0d4b40 100755 --- a/tools/ci/test.sh +++ b/tools/ci/test.sh @@ -8,5 +8,14 @@ if [ "$RUN_TYPE" = "test" ]; then cmake --build . --config Release --parallel 3 --target format.check cd $TRAVIS_BUILD_DIR/build/tests - exec ctest -j3 --output-on-failure + ctest -j3 --output-on-failure +elif [ "$RUN_TYPE" = "sanitizer" ]; then + cd $TRAVIS_BUILD_DIR/build-address/tests + ctest -j1 --output-on-failure + + cd $TRAVIS_BUILD_DIR/build-stack/tests + ctest -j1 --output-on-failure + + cd $TRAVIS_BUILD_DIR/build-thread/tests + ctest -j1 --output-on-failure fi From 08eb1f9737d8737624e73a09bfa69e4f68f77289 Mon Sep 17 00:00:00 2001 From: Michael Vandeberg Date: Mon, 26 Feb 2024 16:41:45 -0800 Subject: [PATCH 15/26] Add gcc and g++ 12 back --- .travis.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index a16a23a..30ae550 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,6 +5,7 @@ cache: addons: apt: + update: true packages: - clang - clang-format @@ -12,6 +13,8 @@ addons: - llvm-dev - lcov - ruby + - gcc-12 + - g++-12 jobs: include: @@ -38,7 +41,7 @@ jobs: dist: jammy env: - RUN_TYPE=test - - MATRIX_EVAL="CC=gcc && CXX=g++" + - MATRIX_EVAL="CC=gcc-12 && CXX=g++-12" - name: "Clang Unit Tests" os: linux dist: jammy From 88695df286c5e43e803a57bd03efafe431942a66 Mon Sep 17 00:00:00 2001 From: Michael Vandeberg Date: Mon, 26 Feb 2024 16:43:44 -0800 Subject: [PATCH 16/26] Fix .travis.yml --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 30ae550..4e458ae 100644 --- a/.travis.yml +++ b/.travis.yml @@ -18,7 +18,7 @@ addons: jobs: include: - - name: "Static Analysis" + - name: "Static Analysis" os: linux dist: jammy env: From d6eeec64a9cc14b289fa0fd58980ff1160745b2c Mon Sep 17 00:00:00 2001 From: Michael Vandeberg Date: Mon, 26 Feb 2024 20:27:58 -0800 Subject: [PATCH 17/26] pushd in build.sh --- tools/ci/build.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/ci/build.sh b/tools/ci/build.sh index c6931e8..378853a 100755 --- a/tools/ci/build.sh +++ b/tools/ci/build.sh @@ -4,7 +4,7 @@ set -e set -x mkdir build -cd build +pushd build if [ "$RUN_TYPE" = "test" ]; then cmake -DCMAKE_BUILD_TYPE=Release .. From 553180801f4f7f4ba9a2072c7a00228a1da3a83a Mon Sep 17 00:00:00 2001 From: Michael Vandeberg Date: Thu, 29 Feb 2024 17:40:55 -0700 Subject: [PATCH 18/26] Update Travis slack secret --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index d74f235..0ea27e3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -53,5 +53,5 @@ after_success: notifications: slack: - secure: Fx5AGddmmeZqZBUbRdI2/zLaiWy7H+afXEWLYSEnu8+bf6sHCOTPvDsHbgPRZikAdSCa0GaxDzqVeFcOFYgX0+XPQgpY4Q2mBwrNViajxArvJXghA/4DfAAUx7knr22RbZjUsEnp2wEGU6Rp9UHo3Y+IOwXR/2v6BnQ1ntMzS1Np1X4HegHXxArrfmbx6JU+43CXajNJGcx9Bhdwnp6puCRQ+gGUz4+tSwy/0UTX4APTGKVqxuhOqRlpVkbCh1g4l6wDJ5gBP0AXM3d2BTmQYdgL6/BwqJG4REjSp6GSaoEOa22cnPtbmXnWY7/npb6uVRVo/WFvJJt6SPGOb7QJn03j/bEK1HCCIdJZR7MwxpAt7vAkihBkkb65izrKXAF05Y5dgZvJsRUCQNr3aDarPPJzU0hLV2g1o1QinuJayUtbXv3/YiP1aqM/zKe4v+J17El6X+2z+Pwvs53brOMpTIfPL/+vCNHreTKHYD90SfU44R/ObwhqS0F+pmbLwarcnwoA0ADBuorGg2HMBEU+GrTxTQiNj0p2SO+2FTFsp4AJ4LhEDsjlrvAQ2TFH3oDIpoR+IN1Pcoi4oJQ7WvFonNVj0Ndw5uJDzPMqMPy3DKNNGtUWaGy+kTHN12kWaid7HTqUDS6hye9Lq4msGwQXdmRyNjB6BENdX2Pt8L0LtEU= + secure: dO+bR69Uu8PBZcFn2Et0ES66sZfxbOytMJBPLagMnp4yfjmeowKgjvzYaGu/LcW8PEHL0EWN+ZJqa3DIq/y6LMQP8E3WcyKZa8U6i48j8+RHgfbfd2PyXuGO6LmLdGVkndxX0MA3oQaKjk1iPwvjdB883Pm6hEFg97+ia0rD+RrfeiCJQFvDGn8QyLHMQQEL1JuTedZffTtBIgep7LPC7CEzaejx8ic/l6/FrqjCEIpTw82pPm2r43wNi+d4X5ieXzk09sy9lzO3MnBXrFVZmFu4hpg1Z1m+EyWUCtYuRGtN78Cc5DBVQq8l50PQge2OKLP5OBCOP97VZ78Z4MgkubpqDItABYEeO+O+jViGj/yXQq2PrOMscrSzvHsAOEtCpJkPftKj7cnCyjLBfAJ3S4zB4id3uRoV+vgmMRJAqDXEuYWssg5V+Nwy06W/3ObWp06p5eR1t8naGksy9X+/pYMz9CTdgL+LoGOhYXCA5yw1ajh2cS63S6GxRcP8GSQT0w2VzK5EFL2KmKWfqgxZ1OPFuO8R6nnf/iwwxI90r1FSzP4x+0MSBh5pN4Ic75/CFQVaYF+rgfmb85aFyxB209GQTrDwW9qKTvAta948xhEE31Mht6lSey8zhFNLNGgHKDflxciMu4DA1G6IuI5YBZgYFJgZbZ5IUrOndtMAZ+M= on_success: never From 756a9b3564c72f18bf589af8876bdb483218ca8a Mon Sep 17 00:00:00 2001 From: Steve Gerbino Date: Fri, 1 Mar 2024 18:12:44 +0100 Subject: [PATCH 19/26] Add new directory structure to README and fix typos --- README.md | 25 +++++++++++-------------- 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index e756120..94196af 100644 --- a/README.md +++ b/README.md @@ -6,15 +6,12 @@ This library implements StateDB, a fork aware persistent database, for the Koino This project's structure follows the [Pitchfork](https://api.csswg.org/bikeshed/?force=1&url=https://raw.githubusercontent.com/vector-of-bool/pitchfork/develop/data/spec.bs) specification. -**`build`**: An ephemeral directory for building the project. Not checked in, but excluded via `.gitignore`. - -**`include`**: Contains all public headers for the Koinos StateDB. - -**`src`**: Contains all source code and private headers for Koinos StateDB. - -**`tests`**: Contains tests for Koinos StateDB. - -**`tools`**: Contains additional tooling for Koinos StateDB, primarily CI scripts. +``` +├── build/ # An ephemeral directory for building the project. Not checked in, but excluded via .gitignore. +├── include/ # Contains all public headers for the Koinos StateDB. +├── src/ # Contains all source code and private headers for Koinos StateDB. +└── tests/ # Contains tests for Koinos StateDB. +``` ### Building @@ -35,17 +32,17 @@ cmake -D CMAKE_BUILD_TYPE=Debug -D STATIC_ANALYSIS=ON .. ### Testing -Tests are built by default as target `koinos_state_db_tests`. You can building them specifically with: +Tests are built by default as target `state_db_tests`. You can building them specifically with: ``` -cmake --build . --config Release --parallel --target koinos_state_db_tests +cmake --build . --config Release --parallel --target state_db_tests ``` Tests can be invoked from the tests directiory within the build directory. ``` cd tests -./koinos_state_db_tests +./state_db_tests ``` Tests can also be ran in parallel using CTest. @@ -65,8 +62,8 @@ cmake --build . --config Debug --parallel 3 --target coverage You can run tests in different sanitizer profiles. Those profiles are None (Default), Address, Stack, and Thread. Currently, these are only known to work with clang, but may work with gcc with additional environment configuration. ``` -cmake -D CMAKE_BUILT_TYPE=Debug -D SANITIZER=Address .. -cmake --build . --config Debug --parallel --target util_tests +cmake -D CMAKE_BUILD_TYPE=Debug -D SANITIZER=Address .. +cmake --build . --config Debug --parallel --target state_db_tests cd tests ctest -j ``` From aa06b98a42cccc01c063db1a5393fd6d0f5aee4b Mon Sep 17 00:00:00 2001 From: Steve Gerbino Date: Fri, 1 Mar 2024 18:25:43 +0100 Subject: [PATCH 20/26] Update the Travis CI YAML --- .travis.yml | 95 ++++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 72 insertions(+), 23 deletions(-) diff --git a/.travis.yml b/.travis.yml index 96df295..a7e91fa 100644 --- a/.travis.yml +++ b/.travis.yml @@ -16,50 +16,99 @@ addons: - gcc-12 - g++-12 +env: + global: + - CMAKE_C_COMPILER_LAUNCHER=ccache + - CMAKE_CXX_COMPILER_LAUNCHER=ccache + jobs: include: - name: "Static Analysis" os: linux dist: jammy env: - - RUN_TYPE=static-analysis - - MATRIX_EVAL="CC=clang && CXX=clang++" + - CC=clang + - CXX=clang++ + before_script: + - cmake -DCMAKE_BUILD_TYPE=Debug -DSTATIC_ANALYSIS=ON . + script: + - cmake --build . --config Debug --parallel 3 + - name: "Sanitizer" os: linux dist: jammy env: - - RUN_TYPE=sanitizer - - MATRIX_EVAL="CC=clang && CXX=clang++" + - CC=clang + - CXX=clang++ + before_script: + - mkdir build-address + - pushd build-address + - cmake -DCMAKE_BUILD_TYPE=Debug -DSANITIZER=Address .. + - cmake --build . --config Debug --parallel 3 + - popd + - mkdir build-stack + - pushd build-stack + - cmake -DCMAKE_BUILD_TYPE=Debug -DSANITIZER=Stack .. + - cmake --build . --config Debug --parallel 3 + - popd + - mkdir build-thread + - pushd build-thread + - cmake -DCMAKE_BUILD_TYPE=Debug -DSANITIZER=Thread .. + - cmake --build . --config Debug --parallel 3 + - popd + script: + - pushd build-address/tests + - ctest -j1 --output-on-failure + - popd + - pushd build-stack/tests + - ctest -j1 --output-on-failure + - popd + - pushd build-thread/tests + - ctest -j1 --output-on-failure + - name: "Coverage" os: linux dist: jammy env: - - RUN_TYPE=coverage - - MATRIX_EVAL="CC=clang && CXX=clang++" + - CC=clang + - CXX=clang++ + install: + - sudo gem install coveralls-lcov + before_script: + - mkdir build + - cd build + - cmake -DCMAKE_BUILD_TYPE=Debug -DCOVERAGE=ON .. + script: + - cmake --build . --config Debug --parallel 3 --target coverage + after_success: + - coveralls-lcov --repo-token $COVERALLS_REPO_TOKEN --service-name travis-pro --service-job-id $TRAVIS_JOB_ID ./coverage.info + - name: "GCC Unit Tests" os: linux dist: jammy env: - - RUN_TYPE=test - - MATRIX_EVAL="CC=gcc-12 && CXX=g++-12" - - name: "Clang Unit Tests" + - CC=gcc-12 + - CXX=g++-12 + before_script: + - cmake -DCMAKE_BUILD_TYPE=Release . + - cmake --build . --config Release --parallel 3 + script: + - cd tests + - ctest -j3 --output-on-failure + + - name: "Clang Unit Tests and Formatting" os: linux dist: jammy env: - - RUN_TYPE=test - - MATRIX_EVAL="CC=clang && CXX=clang++" - -before_install: - - eval "${MATRIX_EVAL}" - -install: - - tools/ci/install.sh - -script: - - tools/ci/build.sh && tools/ci/test.sh - -after_success: - - tools/ci/after_success.sh + - CC=clang + - CXX=clang++ + before_script: + - cmake -DCMAKE_BUILD_TYPE=Release . + - cmake --build . --config Release --parallel 3 + script: + - cmake --build . --config Release --parallel 3 --target format.check + - cd tests + - ctest -j3 --output-on-failure notifications: slack: From c08f220d22f573a559c359c1dc8c78f306ef5141 Mon Sep 17 00:00:00 2001 From: Steve Gerbino Date: Fri, 1 Mar 2024 18:26:07 +0100 Subject: [PATCH 21/26] Remove the tools directory --- tools/ci/after_success.sh | 6 ------ tools/ci/build.sh | 37 ------------------------------------- tools/ci/ccache_clang | 3 --- tools/ci/ccache_clang++ | 3 --- tools/ci/install.sh | 5 ----- tools/ci/test.sh | 21 --------------------- 6 files changed, 75 deletions(-) delete mode 100755 tools/ci/after_success.sh delete mode 100755 tools/ci/build.sh delete mode 100755 tools/ci/ccache_clang delete mode 100755 tools/ci/ccache_clang++ delete mode 100755 tools/ci/install.sh delete mode 100755 tools/ci/test.sh diff --git a/tools/ci/after_success.sh b/tools/ci/after_success.sh deleted file mode 100755 index c03c7ab..0000000 --- a/tools/ci/after_success.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -if [ "$RUN_TYPE" = "coverage" ]; then - coveralls-lcov --repo-token "$COVERALLS_REPO_TOKEN" --service-name travis-pro --service-job-id "$TRAVIS_JOB_ID" ./build/coverage.info -fi - diff --git a/tools/ci/build.sh b/tools/ci/build.sh deleted file mode 100755 index 378853a..0000000 --- a/tools/ci/build.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash - -set -e -set -x - -mkdir build -pushd build - -if [ "$RUN_TYPE" = "test" ]; then - cmake -DCMAKE_BUILD_TYPE=Release .. - cmake --build . --config Release --parallel 3 -elif [ "$RUN_TYPE" = "coverage" ]; then - cmake -DCMAKE_BUILD_TYPE=Debug -DCOVERAGE=ON .. - cmake --build . --config Debug --parallel 3 --target coverage - lcov --version -elif [ "$RUN_TYPE" = "static-analysis" ]; then - cmake -DCMAKE_BUILD_TYPE=Debug -DSTATIC_ANALYSIS=ON .. - cmake --build . --config Debug --parallel 3 -elif [ "$RUN_TYPE" = "sanitizer" ]; then - popd - mkdir build-address - pushd build-address - cmake -DCMAKE_BUILD_TYPE=Debug -DSANITIZER=Address .. - cmake --build . --config Debug --parallel 3 - - popd - mkdir build-stack - pushd build-stack - cmake -DCMAKE_BUILD_TYPE=Debug -DSANITIZER=Stack .. - cmake --build . --config Debug --parallel 3 - - popd - mkdir build-thread - pushd build-thread - cmake -DCMAKE_BUILD_TYPE=Debug -DSANITIZER=Thread .. - cmake --build . --config Debug --parallel 3 -fi diff --git a/tools/ci/ccache_clang b/tools/ci/ccache_clang deleted file mode 100755 index 7aef1a2..0000000 --- a/tools/ci/ccache_clang +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/sh - -exec ccache clang "$@" diff --git a/tools/ci/ccache_clang++ b/tools/ci/ccache_clang++ deleted file mode 100755 index 49b6341..0000000 --- a/tools/ci/ccache_clang++ +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/sh - -exec ccache clang++ "$@" diff --git a/tools/ci/install.sh b/tools/ci/install.sh deleted file mode 100755 index 4371d4e..0000000 --- a/tools/ci/install.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -if [ "$RUN_TYPE" = "coverage" ]; then - sudo gem install coveralls-lcov -fi diff --git a/tools/ci/test.sh b/tools/ci/test.sh deleted file mode 100755 index b0d4b40..0000000 --- a/tools/ci/test.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -set -e -set -x - -if [ "$RUN_TYPE" = "test" ]; then - cd $TRAVIS_BUILD_DIR/build - cmake --build . --config Release --parallel 3 --target format.check - - cd $TRAVIS_BUILD_DIR/build/tests - ctest -j3 --output-on-failure -elif [ "$RUN_TYPE" = "sanitizer" ]; then - cd $TRAVIS_BUILD_DIR/build-address/tests - ctest -j1 --output-on-failure - - cd $TRAVIS_BUILD_DIR/build-stack/tests - ctest -j1 --output-on-failure - - cd $TRAVIS_BUILD_DIR/build-thread/tests - ctest -j1 --output-on-failure -fi From c3054e9264927e591c03002569431efec8f8d102 Mon Sep 17 00:00:00 2001 From: Steve Gerbino Date: Fri, 1 Mar 2024 18:33:06 +0100 Subject: [PATCH 22/26] Use recommended cmake_policy() invocation --- CMakeLists.txt | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index def7dac..1be222e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,16 +1,7 @@ cmake_minimum_required(VERSION 3.19.0) -cmake_policy(SET CMP0074 NEW) -cmake_policy(SET CMP0114 NEW) - -if(${CMAKE_VERSION} VERSION_GREATER_EQUAL 3.24.0) - cmake_policy(SET CMP0135 NEW) -endif() - -if(${CMAKE_VERSION} VERSION_GREATER_EQUAL 3.27.0) - cmake_policy(SET CMP0144 NEW) -endif() +cmake_policy(VERSION 3.19.0...3.27.4) include(FetchContent) FetchContent_Declare( From 64dedd925c9a911e7694e81686a8315cdd684068 Mon Sep 17 00:00:00 2001 From: Steve Gerbino Date: Mon, 4 Mar 2024 16:25:47 +0100 Subject: [PATCH 23/26] Update BOOST_TEST_MODULE --- tests/main.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/main.cpp b/tests/main.cpp index 614368d..78f488d 100644 --- a/tests/main.cpp +++ b/tests/main.cpp @@ -1,3 +1,3 @@ -#define BOOST_TEST_MODULE koinos_state_db_tests +#define BOOST_TEST_MODULE state_db_tests #include #include From 7c16e083afbab8ead512eae8e937c9ee53326d74 Mon Sep 17 00:00:00 2001 From: Steve Gerbino Date: Mon, 4 Mar 2024 19:26:58 +0100 Subject: [PATCH 24/26] Add CONFIG REQUIRED to packages --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 1be222e..2d51628 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -30,7 +30,7 @@ koinos_add_package(ethash CONFIG REQUIRED) koinos_add_package(libsecp256k1-vrf CONFIG REQUIRED) koinos_add_package(nlohmann_json CONFIG REQUIRED) koinos_add_package(OpenSSL REQUIRED) -koinos_add_package(yaml-cpp) +koinos_add_package(yaml-cpp CONFIG REQUIRED) koinos_add_package(gRPC CONFIG REQUIRED) koinos_add_package(koinos_proto CONFIG REQUIRED) From b75092767e60d4d4b9a035a171767efec1eb8b0a Mon Sep 17 00:00:00 2001 From: Michael Vandeberg Date: Mon, 11 Mar 2024 08:16:09 -0700 Subject: [PATCH 25/26] Remove workflows --- .github/workflows/add_issue_to_project.yml | 19 ---------------- .github/workflows/add_pr_to_project.yml | 17 --------------- .github/workflows/add_to_project.yml | 13 ----------- .github/workflows/close_issues.yml | 25 ---------------------- 4 files changed, 74 deletions(-) delete mode 100644 .github/workflows/add_issue_to_project.yml delete mode 100644 .github/workflows/add_pr_to_project.yml delete mode 100644 .github/workflows/add_to_project.yml delete mode 100644 .github/workflows/close_issues.yml diff --git a/.github/workflows/add_issue_to_project.yml b/.github/workflows/add_issue_to_project.yml deleted file mode 100644 index e8f4b83..0000000 --- a/.github/workflows/add_issue_to_project.yml +++ /dev/null @@ -1,19 +0,0 @@ -name: Assign issue to project - -on: - issues: - types: [opened] - -jobs: - assign_to_project: - runs-on: ubuntu-latest - name: Assign issue to project - steps: - - - name: Assign issue with `bug` or `enhancement` label to project - uses: actions/add-to-project@v0.4.0 - with: - project-url: https://github.com/orgs/koinos/projects/6 - github-token: ${{ secrets.ADD_TO_PROJECT_TOKEN }} - labeled: bug, enhancement - label-operator: OR diff --git a/.github/workflows/add_pr_to_project.yml b/.github/workflows/add_pr_to_project.yml deleted file mode 100644 index 31c6c97..0000000 --- a/.github/workflows/add_pr_to_project.yml +++ /dev/null @@ -1,17 +0,0 @@ -name: Assign pull request to project - -on: - pull_request: - types: [opened] - -jobs: - assign_to_project: - runs-on: ubuntu-latest - name: Assign pull request to project - steps: - - - name: Assign pull request to project - uses: actions/add-to-project@v0.4.0 - with: - project-url: https://github.com/orgs/koinos/projects/6 - github-token: ${{ secrets.ADD_TO_PROJECT_TOKEN }} diff --git a/.github/workflows/add_to_project.yml b/.github/workflows/add_to_project.yml deleted file mode 100644 index e9d82be..0000000 --- a/.github/workflows/add_to_project.yml +++ /dev/null @@ -1,13 +0,0 @@ -name: Auto assign to project - -on: - issues: - types: [opened] - pull_request: - types: [opened] - -jobs: - assign_to_project: - uses: koinos/koinos-github-workflows/.github/workflows/add_to_project.yml@master - secrets: - github-token: ${{ secrets.ISSUE_MANAGEMENT_TOKEN }} diff --git a/.github/workflows/close_issues.yml b/.github/workflows/close_issues.yml deleted file mode 100644 index 78ff5a3..0000000 --- a/.github/workflows/close_issues.yml +++ /dev/null @@ -1,25 +0,0 @@ -name: Close inactive issues - -on: - schedule: - - cron: "0 0 * * *" # Run every day at midnight - -jobs: - close-issues: - runs-on: ubuntu-latest - permissions: - issues: write - pull-requests: write - steps: - - uses: actions/stale@v7 - with: - exempt-issue-labels: "story,task,research" - days-before-stale: 30 - days-before-close: 14 - stale-issue-label: "stale" - stale-pr-label: "stale" - stale-issue-message: "This issue is stale because it has been open for 30 days with no activity." - close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale." - stale-pr-message: "This pull request is stale because it has been open for 30 days with no activity." - close-pr-message: "This pull request was closed because it has been inactive for 14 days since being marked as stale." - repo-token: ${{ secrets.ISSUE_MANAGEMENT_TOKEN }} From a3b052ee8ca39928b4b9be6bd94267014c256179 Mon Sep 17 00:00:00 2001 From: Michael Vandeberg Date: Mon, 11 Mar 2024 15:06:10 -0600 Subject: [PATCH 26/26] Bump koinos-cmake --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 2d51628..fbf1cf5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -7,7 +7,7 @@ include(FetchContent) FetchContent_Declare( koinos_cmake GIT_REPOSITORY https://github.com/koinos/koinos-cmake.git - GIT_TAG 897f11188f1761ddd810d77839417549e68465aa) + GIT_TAG 0c8433a118eb4cd5a86bb7ac4708a38db8166801) FetchContent_MakeAvailable(koinos_cmake) include("${koinos_cmake_SOURCE_DIR}/Koinos.cmake")