From b827e4040e69401f261288f52ed4c63400503be9 Mon Sep 17 00:00:00 2001 From: Thomas Brady Date: Wed, 18 Sep 2024 15:10:12 -0700 Subject: [PATCH] Adds --trusted-hash-file and --from-ledger to verify-checkpoints to allow for incremental verification of checkpoints. --- docs/software/commands.md | 19 ++- src/catchup/CatchupWork.cpp | 2 +- src/catchup/VerifyLedgerChainWork.cpp | 25 ++- src/catchup/VerifyLedgerChainWork.h | 5 + src/history/test/HistoryTests.cpp | 2 +- .../WriteVerifiedCheckpointHashesWork.cpp | 154 +++++++++++++++--- .../WriteVerifiedCheckpointHashesWork.h | 25 ++- src/historywork/test/HistoryWorkTests.cpp | 85 ++++++++-- src/main/CommandLine.cpp | 53 +++++- 9 files changed, 325 insertions(+), 45 deletions(-) diff --git a/docs/software/commands.md b/docs/software/commands.md index e8d8c55007..5b90f63d5b 100644 --- a/docs/software/commands.md +++ b/docs/software/commands.md @@ -209,8 +209,23 @@ apply. hash for a checkpoint ledger, and then verifies the entire earlier history of an archive that ends in that ledger hash, writing the output to a reference list of trusted checkpoint hashes. - Option **--output-filename ** is mandatory and specifies the file - to write the trusted checkpoint hashes to. + * Option **--history-hash ** is optional and specifies the hash of the ledger + at the end of the verification range. When provided, `stellar-core` will use the history + hash to verify the range, rather than the latest checkpoint hash obtained from consensus. + Used in conjunction with `--history-ledger`. + * Option **--history-ledger ** is optional and specifies the ledger + number to end the verification at. Used in conjunction with `--history-hash`. + * Option **--output-filename ** is mandatory and specifies the file + to write the trusted checkpoint hashes to. The file will contain a JSON array + of arrays, where each inner array contains the ledger number and the corresponding + checkpoint hash of the form `[[999, "hash-abc"], [935, "hash-def"], ... [0, "hash-xyz]]`. + * Option **--trusted-checkpoint-file ** is optional. If provided, + stellar-core will parse the latest checkpoint ledger number and hash from the file and verify from this ledger to the latest checkpoint ledger obtained from the network. + * Option **--from-ledger ** is optional and specifies the ledger + number to start the verification from. + +> Note: It is an error to provide both the `--trusted-checkpoint-hashes` and `--from-ledger` options. + * **version**: Print version info and then exit. ## HTTP Commands diff --git a/src/catchup/CatchupWork.cpp b/src/catchup/CatchupWork.cpp index 20dfe65359..2d06425863 100644 --- a/src/catchup/CatchupWork.cpp +++ b/src/catchup/CatchupWork.cpp @@ -177,7 +177,7 @@ CatchupWork::downloadVerifyLedgerChain(CatchupRange const& catchupRange, mVerifyLedgers = std::make_shared( mApp, *mDownloadDir, verifyRange, mLastClosedLedgerHashPair, - mRangeEndFuture, std::move(fatalFailurePromise)); + std::nullopt, mRangeEndFuture, std::move(fatalFailurePromise)); // Never retry the sequence: downloads already have retries, and there's no // point retrying verification diff --git a/src/catchup/VerifyLedgerChainWork.cpp b/src/catchup/VerifyLedgerChainWork.cpp index a5603fd1e0..14340ef523 100644 --- a/src/catchup/VerifyLedgerChainWork.cpp +++ b/src/catchup/VerifyLedgerChainWork.cpp @@ -107,6 +107,7 @@ trySetFuture(std::promise& promise, T value) VerifyLedgerChainWork::VerifyLedgerChainWork( Application& app, TmpDir const& downloadDir, LedgerRange const& range, LedgerNumHashPair const& lastClosedLedger, + std::optional const& maxPrevVerified, std::shared_future trustedMaxLedger, std::promise&& fatalFailure, std::shared_ptr outputStream) @@ -118,6 +119,7 @@ VerifyLedgerChainWork::VerifyLedgerChainWork( : mApp.getHistoryManager().checkpointContainingLedger( mRange.last())) , mLastClosed(lastClosedLedger) + , mMaxPrevVerified(maxPrevVerified) , mFatalFailurePromise(std::move(fatalFailure)) , mTrustedMaxLedger(trustedMaxLedger) , mVerifiedMinLedgerPrevFuture(mVerifiedMinLedgerPrev.get_future().share()) @@ -211,7 +213,7 @@ VerifyLedgerChainWork::verifyHistoryOfSingleCheckpoint() } // Verify ledger with local state by comparing to LCL - // When checking against LCL, see it the local node is in the bad state, + // When checking against LCL, see if the local node is in a bad state // or if the archive is in a bad state (in which case, retry) if (curr.header.ledgerSeq == mLastClosed.first) { @@ -242,6 +244,20 @@ VerifyLedgerChainWork::verifyHistoryOfSingleCheckpoint() mChainDisagreesWithLocalState = lclResult; } } + // If the curr history entry is the same ledger as our mMaxPrevVerified, + // verify that the hashes match. + if (mMaxPrevVerified && + curr.header.ledgerSeq == mMaxPrevVerified->first && + curr.hash != mMaxPrevVerified->second) + { + CLOG_ERROR(History, + "Checkpoint {} does not agree with trusted " + "checkpoint hash {}", + LedgerManager::ledgerAbbrev(curr), + LedgerManager::ledgerAbbrev(mMaxPrevVerified->first, + *mMaxPrevVerified->second)); + return HistoryManager::VERIFY_STATUS_ERR_BAD_HASH; + } if (beginCheckpoint) { @@ -365,7 +381,7 @@ VerifyLedgerChainWork::verifyHistoryOfSingleCheckpoint() } else { - // Otherwise we just finished a checkpoint _after_ than the first call + // Otherwise we just finished a checkpoint _after_ the first call // to this method and the `incoming` value we read out of // `mVerifiedAhead` should have content, because the previous call // should have saved something in `mVerifiedAhead`. @@ -420,6 +436,11 @@ VerifyLedgerChainWork::onSuccess() { for (auto const& pair : mVerifiedLedgers) { + if (mMaxPrevVerified && mMaxPrevVerified->first == pair.first) + { + // Skip writing the trusted hash to the output file. + continue; + } (*mOutputStream) << "\n[" << pair.first << ", \"" << binToHex(*pair.second) << "\"],"; } diff --git a/src/catchup/VerifyLedgerChainWork.h b/src/catchup/VerifyLedgerChainWork.h index 53606a1774..ecbaaf0af2 100644 --- a/src/catchup/VerifyLedgerChainWork.h +++ b/src/catchup/VerifyLedgerChainWork.h @@ -26,6 +26,10 @@ class VerifyLedgerChainWork : public BasicWork LedgerRange const mRange; uint32_t mCurrCheckpoint; LedgerNumHashPair const mLastClosed; + // The max ledger number and hash that we have verified up to at some time + // in the past (or genesis if we have no previous verification). Invocations + // of VerifyLedgerChainWork will verify down to this ledger. + std::optional const mMaxPrevVerified; // Record any instance where the chain we're verifying disagrees with the // local node state. This _might_ mean we can't possibly catch up (eg. we're @@ -78,6 +82,7 @@ class VerifyLedgerChainWork : public BasicWork VerifyLedgerChainWork( Application& app, TmpDir const& downloadDir, LedgerRange const& range, LedgerNumHashPair const& lastClosedLedger, + std::optional const& maxPrevVerified, std::shared_future trustedMaxLedger, std::promise&& fatalFailure, std::shared_ptr outputStream = nullptr); diff --git a/src/history/test/HistoryTests.cpp b/src/history/test/HistoryTests.cpp index a6d7b69763..03aa1592f8 100644 --- a/src/history/test/HistoryTests.cpp +++ b/src/history/test/HistoryTests.cpp @@ -243,7 +243,7 @@ TEST_CASE("Ledger chain verification", "[ledgerheaderverification]") std::shared_future fatalFailureFuture = fataFailurePromise.get_future().share(); auto w = wm.executeWork( - tmpDir, ledgerRange, lclPair, ledgerRangeEndFuture, + tmpDir, ledgerRange, lclPair, std::nullopt, ledgerRangeEndFuture, std::move(fataFailurePromise)); REQUIRE(expectedState == w->getState()); REQUIRE(fatalFailureFuture.valid()); diff --git a/src/historywork/WriteVerifiedCheckpointHashesWork.cpp b/src/historywork/WriteVerifiedCheckpointHashesWork.cpp index 45642878b1..f1f86124eb 100644 --- a/src/historywork/WriteVerifiedCheckpointHashesWork.cpp +++ b/src/historywork/WriteVerifiedCheckpointHashesWork.cpp @@ -4,46 +4,86 @@ #include "historywork/WriteVerifiedCheckpointHashesWork.h" #include "catchup/VerifyLedgerChainWork.h" +#include "crypto/Hex.h" #include "history/HistoryManager.h" #include "historywork/BatchDownloadWork.h" #include "ledger/LedgerManager.h" #include "ledger/LedgerRange.h" #include "main/Application.h" +#include "util/Fs.h" #include "util/GlobalChecks.h" #include "util/Logging.h" #include "work/ConditionalWork.h" #include #include +#include #include namespace stellar { +LedgerNumHashPair +WriteVerifiedCheckpointHashesWork::loadLatestHashPairFromJsonOutput( + std::filesystem::path const& path) +{ + if (!fs::exists(path.string())) + { + throw std::runtime_error("file not found: " + path.string()); + } + + std::ifstream in(path); + Json::Value root; + Json::Reader rdr; + if (!rdr.parse(in, root)) + { + throw std::runtime_error("failed to parse JSON input " + path.string()); + } + if (!root.isArray()) + { + throw std::runtime_error("expected top-level array in " + + path.string()); + } + if (root.size() < 2) + { + throw std::runtime_error( + "expected at least one trusted ledger, hash pair in " + + path.string()); + } + // Latest hash is the first element in the array. + auto const& jpair = root[0]; + if (!jpair.isArray() || (jpair.size() != 2)) + { + throw std::runtime_error("expecting 2-element sub-array in " + + path.string()); + } + return {jpair[0].asUInt(), hexToBin256(jpair[1].asString())}; +} Hash WriteVerifiedCheckpointHashesWork::loadHashFromJsonOutput( - uint32_t seq, std::string const& filename) + uint32_t seq, std::filesystem::path const& path) { - std::ifstream in(filename); + std::ifstream in(path); if (!in) { - throw std::runtime_error("error opening " + filename); + throw std::runtime_error("error opening " + path.string()); } Json::Value root; Json::Reader rdr; if (!rdr.parse(in, root)) { - throw std::runtime_error("failed to parse JSON input " + filename); + throw std::runtime_error("failed to parse JSON input " + path.string()); } if (!root.isArray()) { - throw std::runtime_error("expected top-level array in " + filename); + throw std::runtime_error("expected top-level array in " + + path.string()); } for (auto const& jpair : root) { if (!jpair.isArray() || (jpair.size() != 2)) { throw std::runtime_error("expecting 2-element sub-array in " + - filename); + path.string()); } if (jpair[0].asUInt() == seq) { @@ -54,8 +94,12 @@ WriteVerifiedCheckpointHashesWork::loadHashFromJsonOutput( } WriteVerifiedCheckpointHashesWork::WriteVerifiedCheckpointHashesWork( - Application& app, LedgerNumHashPair rangeEnd, std::string const& outputFile, - uint32_t nestedBatchSize, std::shared_ptr archive) + Application& app, LedgerNumHashPair rangeEnd, + std::filesystem::path const& outputFile, + std::optional const& trustedHashFile, + std::optional const& latestTrustedHashPair, + std::optional const& fromLedger, uint32_t nestedBatchSize, + std::shared_ptr archive) : BatchWork(app, "write-verified-checkpoint-hashes") , mNestedBatchSize(nestedBatchSize) , mRangeEnd(rangeEnd) @@ -63,7 +107,13 @@ WriteVerifiedCheckpointHashesWork::WriteVerifiedCheckpointHashesWork( , mRangeEndFuture(mRangeEndPromise.get_future().share()) , mCurrCheckpoint(rangeEnd.first) , mArchive(archive) - , mOutputFileName(outputFile) + , mTrustedHashPath(trustedHashFile) + , mOutputPath(outputFile) + , mTmpDir("verify-checkpoints") + , mTmpOutputPath(std::filesystem::path(mTmpDir.getName()) / + outputFile.filename()) + , mLatestTrustedHashPair(latestTrustedHashPair) + , mFromLedger(fromLedger) { mRangeEndPromise.set_value(mRangeEnd); if (mArchive) @@ -81,6 +131,14 @@ WriteVerifiedCheckpointHashesWork::~WriteVerifiedCheckpointHashesWork() bool WriteVerifiedCheckpointHashesWork::hasNext() const { + if (mFromLedger) + { + return mCurrCheckpoint >= *mFromLedger; + } + else if (mLatestTrustedHashPair) + { + return mCurrCheckpoint >= mLatestTrustedHashPair->first; + } return mCurrCheckpoint != LedgerManager::GENESIS_LEDGER_SEQ; } @@ -101,9 +159,31 @@ WriteVerifiedCheckpointHashesWork::yieldMoreWork() std::make_optional(lclHe.hash)); uint32_t const span = mNestedBatchSize * freq; uint32_t const last = mCurrCheckpoint; - uint32_t const first = - last <= span ? LedgerManager::GENESIS_LEDGER_SEQ - : hm.firstLedgerInCheckpointContaining(last - span); + uint32_t first = last <= span + ? LedgerManager::GENESIS_LEDGER_SEQ + : hm.firstLedgerInCheckpointContaining(last - span); + // If the first ledger in the range is less than mFromLedger then the + // range should be constrained to start at mFromLedger, or the checkpoint + // immediately before it if mFromLedger is not a checkpoint boundary. + if (mFromLedger && first < *mFromLedger) + { + if (hm.isLastLedgerInCheckpoint(*mFromLedger)) + { + first = *mFromLedger; + } + else + { + first = hm.lastLedgerBeforeCheckpointContaining(*mFromLedger); + } + releaseAssertOrThrow(first <= *mFromLedger); + } + // If the latest trusted ledger is greater than the first + // ledger in the range then the range should start at the trusted ledger. + else if (mLatestTrustedHashPair && first < mLatestTrustedHashPair->first) + { + first = mLatestTrustedHashPair->first; + releaseAssertOrThrow(hm.isLastLedgerInCheckpoint(first)); + } LedgerRange const ledgerRange = LedgerRange::inclusive(first, last); CheckpointRange const checkpointRange(ledgerRange, hm); @@ -139,8 +219,8 @@ WriteVerifiedCheckpointHashesWork::yieldMoreWork() : mRangeEndFuture); auto currWork = std::make_shared( - mApp, *tmpDir, ledgerRange, lcl, prevTrusted, std::promise(), - mOutputFile); + mApp, *tmpDir, ledgerRange, lcl, mLatestTrustedHashPair, prevTrusted, + std::promise(), mOutputFile); auto prevWork = mPrevVerifyWork; auto predicate = [prevWork](Application&) { if (!prevWork) @@ -169,11 +249,11 @@ WriteVerifiedCheckpointHashesWork::startOutputFile() { releaseAssert(!mOutputFile); auto mode = std::ios::out | std::ios::trunc; - mOutputFile = std::make_shared(mOutputFileName, mode); + mOutputFile = std::make_shared(mTmpOutputPath, mode); if (!*mOutputFile) { throw std::runtime_error("error opening output file " + - mOutputFileName); + mTmpOutputPath.string()); } (*mOutputFile) << "["; } @@ -183,13 +263,45 @@ WriteVerifiedCheckpointHashesWork::endOutputFile() { if (mOutputFile && mOutputFile->is_open()) { - // Each line of output made by a VerifyLedgerChainWork has a trailing - // comma, and trailing commas are not a valid end of a JSON array; so we - // terminate the array here with an entry that does _not_ have a - // trailing comma (and identifies an invalid ledger number anyways). - (*mOutputFile) << "\n[0, \"\"]\n]\n"; + if (mTrustedHashPath) + { + if (!fs::exists(mTrustedHashPath->string())) + { + throw std::runtime_error("failed to open trusted hash file " + + mTrustedHashPath->string()); + } + // Append everything except the first line of mTrustedHashFile to + // mOutputFile. + std::ifstream trustedHashFile(*mTrustedHashPath); + if (trustedHashFile) + { + std::string line; + // Ignore the first line ("["") + std::getline(trustedHashFile, line); + // Append the rest of the lines to mOutputFile. + while (std::getline(trustedHashFile, line)) + { + (*mOutputFile) << "\n" << line; + } + trustedHashFile.close(); + } + } + else + { + // Each line of output made by a VerifyLedgerChainWork has a + // trailing comma, and trailing commas are not a valid end of a JSON + // array; so we terminate the array here with an entry that does + // _not_ have a trailing comma (and identifies an invalid ledger + // number anyways). + (*mOutputFile) << "\n[0, \"\"]\n]\n"; + } mOutputFile->close(); mOutputFile.reset(); + + // The output file was written to a temporary file, so rename it to + // the output path provided by the user. + fs::durableRename(mTmpOutputPath.string(), mOutputPath.string(), + mOutputPath.relative_path().string()); } } diff --git a/src/historywork/WriteVerifiedCheckpointHashesWork.h b/src/historywork/WriteVerifiedCheckpointHashesWork.h index a1eefcb3d7..038bbb6c16 100644 --- a/src/historywork/WriteVerifiedCheckpointHashesWork.h +++ b/src/historywork/WriteVerifiedCheckpointHashesWork.h @@ -5,7 +5,9 @@ #pragma once #include "ledger/LedgerRange.h" +#include "util/TmpDir.h" #include "work/BatchWork.h" +#include #include #include @@ -27,18 +29,26 @@ class WriteVerifiedCheckpointHashesWork : public BatchWork public: WriteVerifiedCheckpointHashesWork( Application& app, LedgerNumHashPair rangeEnd, - std::string const& outputFile, + std::filesystem::path const& outputFile, + std::optional const& trustedHashFile, + std::optional const& latestTrustedHashPair, + std::optional const& fromLedger, uint32_t nestedBatchSize = NESTED_DOWNLOAD_BATCH_SIZE, std::shared_ptr archive = nullptr); ~WriteVerifiedCheckpointHashesWork(); // Helper to load a hash back from a file produced by this class. static Hash loadHashFromJsonOutput(uint32_t seq, - std::string const& filename); + std::filesystem::path const& path); + // Helper to load the latest hash back from a file produced by this class. + // If the file does not exist, returns std::nullopt. + static LedgerNumHashPair + loadLatestHashPairFromJsonOutput(std::filesystem::path const& path); void onSuccess() override; private: + void maybeParseTrustedHashFile(); // This class is a batch work, but it also creates a conditional dependency // chain among its batch elements (for trusted ledger propagation): this // dependency chain can in turn cause the BatchWork logic to stall, failing @@ -78,6 +88,15 @@ class WriteVerifiedCheckpointHashesWork : public BatchWork void startOutputFile(); void endOutputFile(); std::shared_ptr mOutputFile; - std::string mOutputFileName; + std::optional const mTrustedHashPath; + std::filesystem::path mOutputPath; + TmpDir mTmpDir; + std::filesystem::path mTmpOutputPath; + // If true, mOutputPath == mTrustedHashPath, and output + // will be written to a temporary file before being renamed to + // mOutputPath when verification is complete. + bool mAppendToFile = false; + std::optional mLatestTrustedHashPair; + std::optional const mFromLedger; }; } diff --git a/src/historywork/test/HistoryWorkTests.cpp b/src/historywork/test/HistoryWorkTests.cpp index 8cade8ec85..b297539d92 100644 --- a/src/historywork/test/HistoryWorkTests.cpp +++ b/src/historywork/test/HistoryWorkTests.cpp @@ -14,6 +14,8 @@ #include #include +#include + using namespace stellar; using namespace historytestutils; @@ -31,22 +33,85 @@ TEST_CASE("write verified checkpoint hashes", "[historywork]") LedgerNumHashPair pair = pairs.back(); auto tmpDir = catchupSimulation.getApp().getTmpDirManager().tmpDir( "write-checkpoint-hashes-test"); - auto file = tmpDir.getName() + "/verified-ledgers.json"; + std::string file = tmpDir.getName() + "/verified-ledgers.json"; auto& wm = catchupSimulation.getApp().getWorkScheduler(); + std::optional noFromLedger = std::nullopt; + std::optional noVerifiedLedgerFile = std::nullopt; + std::optional noLatestTrustedHashPair = std::nullopt; + + size_t startingPairIdx = 0; { - auto w = wm.executeWork( - pair, file, nestedBatchSize); - REQUIRE(w->getState() == BasicWork::State::WORK_SUCCESS); + SECTION("from genesis") + { + auto w = wm.executeWork( + pair, file, noVerifiedLedgerFile, noLatestTrustedHashPair, + noFromLedger, nestedBatchSize); + REQUIRE(w->getState() == BasicWork::State::WORK_SUCCESS); + } + SECTION("from specified ledger") + { + startingPairIdx = 1; + std::optional fromLedger = + pairs[startingPairIdx].first; + auto w = wm.executeWork( + pair, file, noVerifiedLedgerFile, noLatestTrustedHashPair, + fromLedger, nestedBatchSize); + REQUIRE(w->getState() == BasicWork::State::WORK_SUCCESS); + } } - for (auto const& p : pairs) + auto checkFileContents = [](const auto& pairs, auto startingPairIdx, + std::string file) { + for (size_t i = 0; i < pairs.size(); ++i) + { + auto p = pairs[i]; + LOG_DEBUG(DEFAULT_LOG, "Verified {} with hash {}", p.first, + hexAbbrev(*p.second)); + Hash h = WriteVerifiedCheckpointHashesWork::loadHashFromJsonOutput( + p.first, file); + // If we did not start from the beginning, the hashes before the + // starting pair should not be in the file. + if (i < startingPairIdx) + { + REQUIRE(h == Hash{}); + } + else + { + REQUIRE(h == *p.second); + } + } + // Check that the "latest" ledger in the file is the same as the last + // pair in the pairs vector. + auto latest = + WriteVerifiedCheckpointHashesWork::loadLatestHashPairFromJsonOutput( + file); + REQUIRE(latest.first == pairs.back().first); + }; + + checkFileContents(pairs, startingPairIdx, file); + + // Advance the simulation. + auto secondCheckpointLedger = + catchupSimulation.getLastCheckpointLedger(10 * nestedBatchSize); + catchupSimulation.ensureOnlineCatchupPossible(secondCheckpointLedger, + 5 * nestedBatchSize); + pairs = catchupSimulation.getAllPublishedCheckpoints(); + + std::optional trustedHashFile = file; + std::optional latestTrustedHashPair = + WriteVerifiedCheckpointHashesWork::loadLatestHashPairFromJsonOutput( + file); + file += ".new"; + // Run work again with existing file. { - LOG_DEBUG(DEFAULT_LOG, "Verified {} with hash {}", p.first, - hexAbbrev(*p.second)); - Hash h = WriteVerifiedCheckpointHashesWork::loadHashFromJsonOutput( - p.first, file); - REQUIRE(h == *p.second); + auto w = wm.executeWork( + pairs.back(), file, trustedHashFile, latestTrustedHashPair, + noFromLedger, nestedBatchSize); + REQUIRE(w->getState() == BasicWork::State::WORK_SUCCESS); } + + // Ensure the file contains all pairs, from the first run and the second. + checkFileContents(pairs, startingPairIdx, file); } TEST_CASE("check single ledger header work", "[historywork]") diff --git a/src/main/CommandLine.cpp b/src/main/CommandLine.cpp index a59df33fde..51cbc4b814 100644 --- a/src/main/CommandLine.cpp +++ b/src/main/CommandLine.cpp @@ -205,6 +205,15 @@ outputFileParser(std::string& string) return clara::Opt{string, "FILE-NAME"}["--output-file"]("output file"); } +clara::Opt +trustedHashFileParser(std::optional& string) +{ + return clara::Opt{[&](std::string const& arg) { string = arg; }, + "FILE-NAME"}["--trusted-hash-file"]( + "file containing trusted hashes, generated by a previous call to " + "verify-checkpoints"); +} + clara::Opt outputDirParser(std::string& string) { @@ -292,6 +301,15 @@ historyLedgerNumber(uint32_t& ledgerNum) "specify a ledger number to examine in history"); } +clara::Opt +fromLedgerNumberParser(std::optional& fromLedgerNum) +{ + return clara::Opt{ + [&](std::string const& arg) { fromLedgerNum = std::stoul(arg); }, + "FROM-LEDGER"}["--from-ledger"]( + "specify a ledger number to start from"); +} + clara::Opt historyHashParser(std::string& hash) { @@ -964,20 +982,44 @@ int runWriteVerifiedCheckpointHashes(CommandLineArgs const& args) { std::string outputFile; + std::optional trustedHashFile; uint32_t startLedger = 0; std::string startHash; + std::optional fromLedger; CommandLine::ConfigOption configOption; return runWithHelp( args, {configurationParser(configOption), historyLedgerNumber(startLedger), - historyHashParser(startHash), outputFileParser(outputFile).required()}, + historyHashParser(startHash), fromLedgerNumberParser(fromLedger), + trustedHashFileParser(trustedHashFile), + outputFileParser(outputFile).required()}, [&] { + if (outputFile.empty()) + { + LOG_ERROR(DEFAULT_LOG, "Must specify --output-file"); + return 1; + } + if (fromLedger && trustedHashFile) + { + LOG_ERROR(DEFAULT_LOG, "Cannot specify both --from-ledger and " + "--trusted-hash-file"); + return 1; + } + std::optional latestTrustedHashPair; + if (trustedHashFile) + { + // Pasrse the latest hash from the trusted hash file before + // starting the application and connecting to the network so + // that we can exit early if there is malformed input. + latestTrustedHashPair.emplace( + WriteVerifiedCheckpointHashesWork:: + loadLatestHashPairFromJsonOutput(*trustedHashFile)); + } VirtualClock clock(VirtualClock::REAL_TIME); auto cfg = configOption.getConfig(); - // Set up for quick in-memory no-catchup mode. + // Set up for quick no-catchup mode. cfg.QUORUM_INTERSECTION_CHECKER = false; - cfg.setInMemoryMode(); cfg.MODE_DOES_CATCHUP = false; auto app = Application::create(clock, cfg, false); @@ -998,8 +1040,9 @@ runWriteVerifiedCheckpointHashes(CommandLineArgs const& args) app->getOverlayManager().shutdown(); app->getHerder().shutdown(); app->getWorkScheduler() - .executeWork(authPair, - outputFile); + .executeWork( + authPair, outputFile, trustedHashFile, + latestTrustedHashPair, fromLedger); app->gracefulStop(); return 0; }