From f7c0f1032ba4509ad0ac22912ddfe7f9098d5481 Mon Sep 17 00:00:00 2001 From: Danyal Prout Date: Thu, 30 May 2024 09:16:44 -0500 Subject: [PATCH 01/11] Add version handler for api --- Dockerfile | 2 +- api/Makefile | 3 +++ api/service/api.go | 13 +++++++++++++ api/service/api_test.go | 18 ++++++++++++++++++ api/version/version.go | 29 +++++++++++++++++++++++++++++ docker-compose.yml | 2 ++ 6 files changed, 66 insertions(+), 1 deletion(-) create mode 100644 api/version/version.go diff --git a/Dockerfile b/Dockerfile index 36b0e30..b5d2cd3 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,6 +1,6 @@ FROM golang:1.21.6-alpine3.19 as builder -RUN apk add --no-cache make gcc musl-dev linux-headers jq bash +RUN apk add --no-cache make gcc musl-dev linux-headers jq bash git WORKDIR /app diff --git a/api/Makefile b/api/Makefile index e809280..3dd0753 100644 --- a/api/Makefile +++ b/api/Makefile @@ -1,3 +1,6 @@ +GITCOMMIT ?= $(shell git rev-parse HEAD) +LDFLAGS := -ldflags "-X github.com/base-org/blob-archiver/api/version.GitCommit=$(GITCOMMIT)" + blob-api: env GO111MODULE=on GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) go build -v $(LDFLAGS) -o ./bin/blob-api ./cmd/main.go diff --git a/api/service/api.go b/api/service/api.go index b5e247d..07a8a53 100644 --- a/api/service/api.go +++ b/api/service/api.go @@ -15,6 +15,7 @@ import ( "github.com/attestantio/go-eth2-client/api" "github.com/attestantio/go-eth2-client/spec/deneb" m "github.com/base-org/blob-archiver/api/metrics" + "github.com/base-org/blob-archiver/api/version" "github.com/base-org/blob-archiver/common/storage" opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" "github.com/ethereum/go-ethereum/common" @@ -106,6 +107,7 @@ func NewAPI(dataStoreClient storage.DataStoreReader, beaconClient client.BeaconB }) r.Get("/eth/v1/beacon/blob_sidecars/{id}", result.blobSidecarHandler) + r.Get("/eth/v1/node/version", result.versionHandler) return result } @@ -128,6 +130,17 @@ func isKnownIdentifier(id string) bool { return slices.Contains([]string{"genesis", "finalized", "head"}, id) } +// versionHandler implements the /eth/v1/node/version endpoint. +func (a *API) versionHandler(w http.ResponseWriter, _ *http.Request) { + w.Header().Set("Content-Type", jsonAcceptType) + w.WriteHeader(http.StatusOK) + err := json.NewEncoder(w).Encode(version.APIVersion) + if err != nil { + a.logger.Error("unable to encode version to JSON", "err", err) + errServerError.write(w) + } +} + // toBeaconBlockHash converts a string that can be a slot, hash or identifier to a beacon block hash. func (a *API) toBeaconBlockHash(id string) (common.Hash, *httpError) { if isHash(id) { diff --git a/api/service/api_test.go b/api/service/api_test.go index 19b4aaf..d8bc0fd 100644 --- a/api/service/api_test.go +++ b/api/service/api_test.go @@ -15,6 +15,7 @@ import ( "github.com/attestantio/go-eth2-client/spec/deneb" "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/base-org/blob-archiver/api/metrics" + "github.com/base-org/blob-archiver/api/version" "github.com/base-org/blob-archiver/common/beacon/beacontest" "github.com/base-org/blob-archiver/common/blobtest" "github.com/base-org/blob-archiver/common/storage" @@ -303,6 +304,23 @@ func TestAPIService(t *testing.T) { } } +func TestVersionHandler(t *testing.T) { + a, _, _, cleanup := setup(t) + defer cleanup() + + request := httptest.NewRequest("GET", "/eth/v1/node/version", nil) + response := httptest.NewRecorder() + + a.router.ServeHTTP(response, request) + + require.Equal(t, 200, response.Code) + require.Equal(t, "application/json", response.Header().Get("Content-Type")) + var v version.Version + err := json.Unmarshal(response.Body.Bytes(), &v) + require.NoError(t, err) + require.Equal(t, "Blob Archiver API/unknown", v.Data.Version) +} + func TestHealthHandler(t *testing.T) { a, _, _, cleanup := setup(t) defer cleanup() diff --git a/api/version/version.go b/api/version/version.go new file mode 100644 index 0000000..7c6fb80 --- /dev/null +++ b/api/version/version.go @@ -0,0 +1,29 @@ +package version + +import "fmt" + +var ( + GitCommit = "" + APIVersion Version +) + +func init() { + commit := GitCommit + if commit == "" { + commit = "unknown" + } + + APIVersion = Version{ + Data: struct { + Version string `json:"version"` + }{ + Version: fmt.Sprintf("Blob Archiver API/%s", commit), + }, + } +} + +type Version struct { + Data struct { + Version string `json:"version"` + } `json:"data"` +} diff --git a/docker-compose.yml b/docker-compose.yml index 9fba628..1a15598 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -7,6 +7,8 @@ services: dockerfile: Dockerfile env_file: - .env + ports: + - "8000:8000" command: - "blob-api" depends_on: From 3c824be7c23590674091f95ff061a11b63d74fdc Mon Sep 17 00:00:00 2001 From: Danyal Prout Date: Thu, 30 May 2024 10:45:52 -0500 Subject: [PATCH 02/11] Bumps deps + migrate to op-stack types --- api/service/api_test.go | 4 +- api/version/version.go | 20 ++++----- go.mod | 38 ++++++++--------- go.sum | 90 ++++++++++++++++++----------------------- 4 files changed, 67 insertions(+), 85 deletions(-) diff --git a/api/service/api_test.go b/api/service/api_test.go index d8bc0fd..6619e6a 100644 --- a/api/service/api_test.go +++ b/api/service/api_test.go @@ -15,10 +15,10 @@ import ( "github.com/attestantio/go-eth2-client/spec/deneb" "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/base-org/blob-archiver/api/metrics" - "github.com/base-org/blob-archiver/api/version" "github.com/base-org/blob-archiver/common/beacon/beacontest" "github.com/base-org/blob-archiver/common/blobtest" "github.com/base-org/blob-archiver/common/storage" + "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/testlog" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" @@ -315,7 +315,7 @@ func TestVersionHandler(t *testing.T) { require.Equal(t, 200, response.Code) require.Equal(t, "application/json", response.Header().Get("Content-Type")) - var v version.Version + var v eth.APIVersionResponse err := json.Unmarshal(response.Body.Bytes(), &v) require.NoError(t, err) require.Equal(t, "Blob Archiver API/unknown", v.Data.Version) diff --git a/api/version/version.go b/api/version/version.go index 7c6fb80..8426f54 100644 --- a/api/version/version.go +++ b/api/version/version.go @@ -1,10 +1,14 @@ package version -import "fmt" +import ( + "fmt" + + "github.com/ethereum-optimism/optimism/op-service/eth" +) var ( GitCommit = "" - APIVersion Version + APIVersion eth.APIVersionResponse ) func init() { @@ -13,17 +17,9 @@ func init() { commit = "unknown" } - APIVersion = Version{ - Data: struct { - Version string `json:"version"` - }{ + APIVersion = eth.APIVersionResponse{ + Data: eth.VersionInformation{ Version: fmt.Sprintf("Blob Archiver API/%s", commit), }, } } - -type Version struct { - Data struct { - Version string `json:"version"` - } `json:"data"` -} diff --git a/go.mod b/go.mod index 7e86467..d847843 100644 --- a/go.mod +++ b/go.mod @@ -4,10 +4,10 @@ go 1.21.6 require ( github.com/attestantio/go-eth2-client v0.21.1 - github.com/ethereum-optimism/optimism v1.7.2 - github.com/ethereum/go-ethereum v1.13.8 + github.com/ethereum-optimism/optimism v1.7.6 + github.com/ethereum/go-ethereum v1.101315.1 github.com/go-chi/chi/v5 v5.0.12 - github.com/minio/minio-go/v7 v7.0.66 + github.com/minio/minio-go/v7 v7.0.70 github.com/prometheus/client_golang v1.19.0 github.com/rs/zerolog v1.32.0 github.com/stretchr/testify v1.9.0 @@ -17,7 +17,6 @@ require ( require ( github.com/DataDog/zstd v1.5.2 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect - github.com/VictoriaMetrics/fastcache v1.12.1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bits-and-blooms/bitset v1.10.0 // indirect github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect @@ -34,9 +33,9 @@ require ( github.com/crate-crypto/go-kzg-4844 v0.7.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/deckarep/golang-set/v2 v2.1.0 // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect - github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20240318114348-52d3dbd1605d // indirect + github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20240522134500-19555bdbdc95 // indirect github.com/ethereum/c-kzg-4844 v0.4.0 // indirect github.com/fatih/color v1.16.0 // indirect github.com/ferranbt/fastssz v0.1.3 // indirect @@ -45,16 +44,16 @@ require ( github.com/go-logr/logr v1.2.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect + github.com/goccy/go-json v0.10.2 // indirect github.com/goccy/go-yaml v1.9.2 // indirect github.com/gofrs/flock v0.8.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/google/uuid v1.6.0 // indirect - github.com/gorilla/websocket v1.5.0 // indirect + github.com/gorilla/websocket v1.5.1 // indirect github.com/holiman/uint256 v1.2.4 // indirect github.com/huandu/go-clone v1.6.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.4 // indirect + github.com/klauspost/compress v1.17.6 // indirect github.com/klauspost/cpuid/v2 v2.2.7 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect @@ -65,8 +64,6 @@ require ( github.com/minio/sha256-simd v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mmcloughlin/addchain v0.4.0 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect @@ -80,25 +77,24 @@ require ( github.com/rs/xid v1.5.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/shirou/gopsutil v3.21.11+incompatible // indirect - github.com/sirupsen/logrus v1.9.3 // indirect github.com/supranational/blst v0.3.11 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect - github.com/yusufpapurcu/wmi v1.2.2 // indirect + github.com/yusufpapurcu/wmi v1.2.3 // indirect go.opentelemetry.io/otel v1.16.0 // indirect go.opentelemetry.io/otel/metric v1.16.0 // indirect go.opentelemetry.io/otel/trace v1.16.0 // indirect - golang.org/x/crypto v0.21.0 // indirect - golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect + golang.org/x/crypto v0.23.0 // indirect + golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 // indirect golang.org/x/mod v0.14.0 // indirect - golang.org/x/net v0.21.0 // indirect + golang.org/x/net v0.23.0 // indirect golang.org/x/sync v0.6.0 // indirect - golang.org/x/sys v0.18.0 // indirect - golang.org/x/term v0.18.0 // indirect - golang.org/x/text v0.14.0 // indirect - golang.org/x/tools v0.16.1 // indirect + golang.org/x/sys v0.20.0 // indirect + golang.org/x/term v0.20.0 // indirect + golang.org/x/text v0.15.0 // indirect + golang.org/x/tools v0.17.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect google.golang.org/protobuf v1.32.0 // indirect gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect @@ -108,4 +104,4 @@ require ( rsc.io/tmplfunc v0.0.3 // indirect ) -replace github.com/ethereum/go-ethereum v1.13.8 => github.com/ethereum-optimism/op-geth v1.101308.3-rc.1 +replace github.com/ethereum/go-ethereum v1.101315.1 => github.com/ethereum-optimism/op-geth v1.101315.1 diff --git a/go.sum b/go.sum index 74ab11b..45153a6 100644 --- a/go.sum +++ b/go.sum @@ -4,9 +4,6 @@ github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migc github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40= github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o= -github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= -github.com/allegro/bigcache v1.2.1 h1:hg1sY1raCwic3Vnsvje6TT7/pnZba83LeFck5NrFKSc= -github.com/allegro/bigcache v1.2.1/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/attestantio/go-eth2-client v0.21.1 h1:yvsMd/azPUbxiJzWZhgqfOJJRNF1zLvAJpcBXTHzyh8= github.com/attestantio/go-eth2-client v0.21.1/go.mod h1:Tb412NpzhsC0sbtpXS4D51y5se6nDkWAi6amsJrqX9c= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -54,16 +51,16 @@ github.com/deckarep/golang-set/v2 v2.1.0 h1:g47V4Or+DUdzbs8FxCCmgb6VYd+ptPAngjM6 github.com/deckarep/golang-set/v2 v2.1.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/ethereum-optimism/op-geth v1.101308.3-rc.1 h1:mC8PrDNTZJr7sYcm+FgaWFUy/LOZ0sS8+BxkglUoPkg= -github.com/ethereum-optimism/op-geth v1.101308.3-rc.1/go.mod h1:k0UbrLuOITLD8goCyA2xWebAL03n2BZUCfwos0rxz60= -github.com/ethereum-optimism/optimism v1.7.2 h1:9S1Qi9Ns4eGuFtfpIG4OnX/CDzA0dx4mHGIv5oaYeEw= -github.com/ethereum-optimism/optimism v1.7.2/go.mod h1:ZwYKTK1oLHPnVsNvGbVXCn2FNwdacfizPAAEDHVh6Ck= -github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20240318114348-52d3dbd1605d h1:K7HdD/ZAcSFhcqqnUAbvU+8vsg0DzL8pvetHw5vRLCc= -github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20240318114348-52d3dbd1605d/go.mod h1:7xh2awFQqsiZxFrHKTgEd+InVfDRrkKVUIuK8SAFHp0= +github.com/ethereum-optimism/op-geth v1.101315.1 h1:GhHlJ60h652XbRkp/MyWP355y+imNWjPm7hWlnG6+Fc= +github.com/ethereum-optimism/op-geth v1.101315.1/go.mod h1:8tQ6r0e1NNJbSVHzYKafQqf62gV9BzZR+SKkXRckjLM= +github.com/ethereum-optimism/optimism v1.7.6 h1:iwbO47lwa6vi5gQA0Lbnf/uOzmqXFHvXgmziLtVMbwM= +github.com/ethereum-optimism/optimism v1.7.6/go.mod h1:0zhgYDWSk2ZgzFkhA4ENcWRvS0EuO9IUQAhXenvtSZM= +github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20240522134500-19555bdbdc95 h1:GjXKQg6u6WkEIcY0dvW2IKhMRY8cVjwdw+rNKhduAo8= +github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20240522134500-19555bdbdc95/go.mod h1:7xh2awFQqsiZxFrHKTgEd+InVfDRrkKVUIuK8SAFHp0= github.com/ethereum/c-kzg-4844 v0.4.0 h1:3MS1s4JtA868KpJxroZoepdV0ZKBp3u/O5HcZ7R3nlY= github.com/ethereum/c-kzg-4844 v0.4.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= @@ -71,8 +68,8 @@ github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/ferranbt/fastssz v0.1.3 h1:ZI+z3JH05h4kgmFXdHuR1aWYsgrg7o+Fw7/NCzM16Mo= github.com/ferranbt/fastssz v0.1.3/go.mod h1:0Y9TEd/9XuFlh7mskMPfXiI2Dkw4Ddg9EyXt1W7MRvE= -github.com/fjl/memsize v0.0.1 h1:+zhkb+dhUgx0/e+M8sF0QqiouvMQUiKR+QYvdxIOKcQ= -github.com/fjl/memsize v0.0.1/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= +github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA= +github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= @@ -107,6 +104,8 @@ github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn github.com/go-playground/validator/v10 v10.11.1 h1:prmOlTVv+YjZjmRmNSF3VmspqJIxJWXmqUsHwfTRRkQ= github.com/go-playground/validator/v10 v10.11.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= +github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/goccy/go-yaml v1.9.2 h1:2Njwzw+0+pjU2gb805ZC1B/uBuAs2VcZ3K+ZgHwDs7w= github.com/goccy/go-yaml v1.9.2/go.mod h1:U/jl18uSupI5rdI2jmuCswEA2htH9eXfferR3KfscvA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -134,19 +133,18 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.1-0.20220503160820-4a35382e8fc8 h1:Ep/joEub9YwcjRY6ND3+Y/w0ncE540RtGatVhtZL0/Q= github.com/google/gofuzz v1.2.1-0.20220503160820-4a35382e8fc8/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= -github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= +github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= github.com/hashicorp/go-bexpr v0.1.11 h1:6DqdA/KBjurGby9yTY0bmkathya0lfwF2SeuubCI7dY= github.com/hashicorp/go-bexpr v0.1.11/go.mod h1:f03lAo0duBlDIUMGCuad8oLcgejw4m7U+N8T+6Kz1AE= -github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 h1:3JQNjnMRil1yD0IfZKHF9GxxWKDJGj8I0IqOUol//sw= -github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc= +github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 h1:X4egAf/gcS1zATw6wn4Ej8vjuVGxeHdan+bRb2ebyv4= +github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU= @@ -163,12 +161,10 @@ github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFck github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= -github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI= +github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= @@ -196,8 +192,8 @@ github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWV github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.66 h1:bnTOXOHjOqv/gcMuiVbN9o2ngRItvqE774dG9nq0Dzw= -github.com/minio/minio-go/v7 v7.0.66/go.mod h1:DHAgmyQEGdW3Cif0UooKOyrT3Vxs82zNdV6tkKhRtbs= +github.com/minio/minio-go/v7 v7.0.70 h1:1u9NtMgfK1U42kUxcsl5v0yj6TEOPR497OAQxpJnn2g= +github.com/minio/minio-go/v7 v7.0.70/go.mod h1:4yBA8v80xGA30cfM3fz0DKYMXunWl/AV/6tWEs9ryzo= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= @@ -207,14 +203,10 @@ github.com/mitchellh/pointerstructure v1.2.1/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8oh github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY= github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU= github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= +github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -264,12 +256,11 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA= github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -294,8 +285,8 @@ github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRT github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= -github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= +github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= @@ -305,10 +296,10 @@ go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLk golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= -golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ= -golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE= +golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o= +golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= @@ -323,8 +314,8 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -350,7 +341,6 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -358,19 +348,19 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -378,8 +368,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= -golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= +golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From e6b9281baeb34fd51532689b459ea35cbe58cade Mon Sep 17 00:00:00 2001 From: Ino Murko Date: Thu, 30 May 2024 15:35:38 +0000 Subject: [PATCH 03/11] able to pass in path in bucket and trim hash --- archiver/flags/config.go | 8 +++++--- common/flags/config.go | 2 ++ common/flags/flags.go | 7 +++++++ common/storage/s3.go | 8 +++++--- 4 files changed, 19 insertions(+), 6 deletions(-) diff --git a/archiver/flags/config.go b/archiver/flags/config.go index d680e6e..a14709e 100644 --- a/archiver/flags/config.go +++ b/archiver/flags/config.go @@ -3,7 +3,8 @@ package flags import ( "fmt" "time" - + "strings" + common "github.com/base-org/blob-archiver/common/flags" oplog "github.com/ethereum-optimism/optimism/op-service/log" opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" @@ -35,7 +36,7 @@ func (c ArchiverConfig) Check() error { } if c.OriginBlock == (geth.Hash{}) { - return fmt.Errorf("invalid origin block") + return fmt.Errorf("invalid origin block %s", c.OriginBlock) } if c.ListenAddr == "" { @@ -47,13 +48,14 @@ func (c ArchiverConfig) Check() error { func ReadConfig(cliCtx *cli.Context) ArchiverConfig { pollInterval, _ := time.ParseDuration(cliCtx.String(ArchiverPollIntervalFlag.Name)) + return ArchiverConfig{ LogConfig: oplog.ReadCLIConfig(cliCtx), MetricsConfig: opmetrics.ReadCLIConfig(cliCtx), BeaconConfig: common.NewBeaconConfig(cliCtx), StorageConfig: common.NewStorageConfig(cliCtx), PollInterval: pollInterval, - OriginBlock: geth.HexToHash(cliCtx.String(ArchiverOriginBlock.Name)), + OriginBlock: geth.HexToHash(strings.Trim(cliCtx.String(ArchiverOriginBlock.Name), "\"")), ListenAddr: cliCtx.String(ArchiverListenAddrFlag.Name), } } diff --git a/common/flags/config.go b/common/flags/config.go index 369f3a3..906379c 100644 --- a/common/flags/config.go +++ b/common/flags/config.go @@ -24,6 +24,7 @@ type S3Config struct { Endpoint string UseHttps bool Bucket string + Path string S3CredentialType S3CredentialType AccessKey string @@ -106,6 +107,7 @@ func readS3Config(ctx *cli.Context) S3Config { SecretAccessKey: ctx.String(S3SecretAccessKeyFlagName), UseHttps: ctx.Bool(S3EndpointHttpsFlagName), Bucket: ctx.String(S3BucketFlagName), + Path: ctx.String(S3PathFlagName), S3CredentialType: toS3CredentialType(ctx.String(S3CredentialTypeFlagName)), Compress: ctx.Bool(S3CompressFlagName), } diff --git a/common/flags/flags.go b/common/flags/flags.go index a1f2c48..1829700 100644 --- a/common/flags/flags.go +++ b/common/flags/flags.go @@ -17,6 +17,7 @@ const ( S3AccessKeyFlagName = "s3-access-key" S3SecretAccessKeyFlagName = "s3-secret-access-key" S3BucketFlagName = "s3-bucket" + S3PathFlagName = "s3-path" FileStorageDirectoryFlagName = "file-directory" ) @@ -77,6 +78,12 @@ func CLIFlags(envPrefix string) []cli.Flag { Hidden: true, EnvVars: opservice.PrefixEnvVar(envPrefix, "S3_BUCKET"), }, + &cli.StringFlag{ + Name: S3PathFlagName, + Usage: "The path to append to file", + Hidden: true, + EnvVars: opservice.PrefixEnvVar(envPrefix, "S3_PATH"), + }, // File Data Store Flags &cli.StringFlag{ Name: FileStorageDirectoryFlagName, diff --git a/common/storage/s3.go b/common/storage/s3.go index 6be01fc..6263b36 100644 --- a/common/storage/s3.go +++ b/common/storage/s3.go @@ -17,6 +17,7 @@ import ( type S3Storage struct { s3 *minio.Client bucket string + path string log log.Logger compress bool } @@ -41,13 +42,14 @@ func NewS3Storage(cfg flags.S3Config, l log.Logger) (*S3Storage, error) { return &S3Storage{ s3: client, bucket: cfg.Bucket, + path: cfg.Path, log: l, compress: cfg.Compress, }, nil } func (s *S3Storage) Exists(ctx context.Context, hash common.Hash) (bool, error) { - _, err := s.s3.StatObject(ctx, s.bucket, hash.String(), minio.StatObjectOptions{}) + _, err := s.s3.StatObject(ctx, s.bucket, s.path+hash.String(), minio.StatObjectOptions{}) if err != nil { errResponse := minio.ToErrorResponse(err) if errResponse.Code == "NoSuchKey" { @@ -61,7 +63,7 @@ func (s *S3Storage) Exists(ctx context.Context, hash common.Hash) (bool, error) } func (s *S3Storage) Read(ctx context.Context, hash common.Hash) (BlobData, error) { - res, err := s.s3.GetObject(ctx, s.bucket, hash.String(), minio.GetObjectOptions{}) + res, err := s.s3.GetObject(ctx, s.bucket, s.path+hash.String(), minio.GetObjectOptions{}) if err != nil { s.log.Info("unexpected error fetching blob", "hash", hash.String(), "err", err) return BlobData{}, ErrStorage @@ -122,7 +124,7 @@ func (s *S3Storage) Write(ctx context.Context, data BlobData) error { reader := bytes.NewReader(b) - _, err = s.s3.PutObject(ctx, s.bucket, data.Header.BeaconBlockHash.String(), reader, int64(len(b)), options) + _, err = s.s3.PutObject(ctx, s.bucket, s.path+data.Header.BeaconBlockHash.String(), reader, int64(len(b)), options) if err != nil { s.log.Warn("error writing blob", "err", err) From 389c0529967cf5f2f139994391beeebc9b6e2711 Mon Sep 17 00:00:00 2001 From: Ino Murko Date: Fri, 31 May 2024 10:22:07 +0000 Subject: [PATCH 04/11] default path, replace deprecated docker command --- Makefile | 6 +++--- README.md | 2 +- archiver/flags/config.go | 5 ++--- common/flags/flags.go | 1 + common/storage/s3.go | 9 +++++---- common/storage/s3_test.go | 4 ++-- 6 files changed, 14 insertions(+), 13 deletions(-) diff --git a/Makefile b/Makefile index 6cbd8bc..53a3699 100644 --- a/Makefile +++ b/Makefile @@ -5,7 +5,7 @@ build: .PHONY: build build-docker: - docker-compose build + docker compose build .PHONY: build-docker clean: @@ -21,8 +21,8 @@ test: .PHONY: test integration: - docker-compose down - docker-compose up -d minio create-buckets + docker compose down + docker compose up -d minio create-buckets RUN_INTEGRATION_TESTS=true go test -v ./... .PHONY: integration diff --git a/README.md b/README.md index fe41cc3..d879f65 100644 --- a/README.md +++ b/README.md @@ -46,7 +46,7 @@ To run the project locally, you should first copy `.env.template` to `.env` and to your beacon client and storage backend of choice. Then you can run the project with: ```sh -docker-compose up +docker compose up ``` You can see a full list of configuration options by running: diff --git a/archiver/flags/config.go b/archiver/flags/config.go index a14709e..e0d2f32 100644 --- a/archiver/flags/config.go +++ b/archiver/flags/config.go @@ -2,9 +2,9 @@ package flags import ( "fmt" - "time" "strings" - + "time" + common "github.com/base-org/blob-archiver/common/flags" oplog "github.com/ethereum-optimism/optimism/op-service/log" opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" @@ -48,7 +48,6 @@ func (c ArchiverConfig) Check() error { func ReadConfig(cliCtx *cli.Context) ArchiverConfig { pollInterval, _ := time.ParseDuration(cliCtx.String(ArchiverPollIntervalFlag.Name)) - return ArchiverConfig{ LogConfig: oplog.ReadCLIConfig(cliCtx), MetricsConfig: opmetrics.ReadCLIConfig(cliCtx), diff --git a/common/flags/flags.go b/common/flags/flags.go index 1829700..d41eb90 100644 --- a/common/flags/flags.go +++ b/common/flags/flags.go @@ -83,6 +83,7 @@ func CLIFlags(envPrefix string) []cli.Flag { Usage: "The path to append to file", Hidden: true, EnvVars: opservice.PrefixEnvVar(envPrefix, "S3_PATH"), + Value: "", }, // File Data Store Flags &cli.StringFlag{ diff --git a/common/storage/s3.go b/common/storage/s3.go index 6263b36..bf68e92 100644 --- a/common/storage/s3.go +++ b/common/storage/s3.go @@ -6,6 +6,7 @@ import ( "context" "encoding/json" "io" + "path" "github.com/base-org/blob-archiver/common/flags" "github.com/ethereum/go-ethereum/common" @@ -17,7 +18,7 @@ import ( type S3Storage struct { s3 *minio.Client bucket string - path string + path string log log.Logger compress bool } @@ -49,7 +50,7 @@ func NewS3Storage(cfg flags.S3Config, l log.Logger) (*S3Storage, error) { } func (s *S3Storage) Exists(ctx context.Context, hash common.Hash) (bool, error) { - _, err := s.s3.StatObject(ctx, s.bucket, s.path+hash.String(), minio.StatObjectOptions{}) + _, err := s.s3.StatObject(ctx, s.bucket, path.Join(s.path, hash.String()), minio.StatObjectOptions{}) if err != nil { errResponse := minio.ToErrorResponse(err) if errResponse.Code == "NoSuchKey" { @@ -63,7 +64,7 @@ func (s *S3Storage) Exists(ctx context.Context, hash common.Hash) (bool, error) } func (s *S3Storage) Read(ctx context.Context, hash common.Hash) (BlobData, error) { - res, err := s.s3.GetObject(ctx, s.bucket, s.path+hash.String(), minio.GetObjectOptions{}) + res, err := s.s3.GetObject(ctx, s.bucket, path.Join(s.path, hash.String()), minio.GetObjectOptions{}) if err != nil { s.log.Info("unexpected error fetching blob", "hash", hash.String(), "err", err) return BlobData{}, ErrStorage @@ -124,7 +125,7 @@ func (s *S3Storage) Write(ctx context.Context, data BlobData) error { reader := bytes.NewReader(b) - _, err = s.s3.PutObject(ctx, s.bucket, s.path+data.Header.BeaconBlockHash.String(), reader, int64(len(b)), options) + _, err = s.s3.PutObject(ctx, s.bucket, path.Join(s.path, data.Header.BeaconBlockHash.String()), reader, int64(len(b)), options) if err != nil { s.log.Warn("error writing blob", "err", err) diff --git a/common/storage/s3_test.go b/common/storage/s3_test.go index 3a233a2..e1f6753 100644 --- a/common/storage/s3_test.go +++ b/common/storage/s3_test.go @@ -14,8 +14,8 @@ import ( // Prior to running these tests, a local Minio server must be running. // You can accomplish this with: -// docker-compose down # shut down any running services -// docker-compose up minio create-buckets # start the minio service +// docker compose down # shut down any running services +// docker compose up minio create-buckets # start the minio service func setupS3(t *testing.T) *S3Storage { if os.Getenv("RUN_INTEGRATION_TESTS") == "" { t.Skip("skipping integration tests: set RUN_INTEGRATION_TESTS environment variable") From 17c8a8adccba7c6a064c7ec5744db88da15279f6 Mon Sep 17 00:00:00 2001 From: Samuel Stokes Date: Mon, 17 Jun 2024 09:16:40 -0400 Subject: [PATCH 05/11] Store backfill_processes status for protection against interruptions --- api/service/api.go | 2 +- api/service/api_test.go | 4 +- archiver/service/archiver.go | 78 +++++++++++++++++++-------- archiver/service/archiver_test.go | 87 ++++++++++++++++++++++++++++-- common/blobtest/helpers.go | 2 + common/storage/file.go | 57 ++++++++++++++++++-- common/storage/file_test.go | 14 ++--- common/storage/s3.go | 87 ++++++++++++++++++++++++++++-- common/storage/storage.go | 24 +++++++-- common/storage/storagetest/stub.go | 8 +-- 10 files changed, 312 insertions(+), 51 deletions(-) diff --git a/api/service/api.go b/api/service/api.go index 07a8a53..d5a7059 100644 --- a/api/service/api.go +++ b/api/service/api.go @@ -179,7 +179,7 @@ func (a *API) blobSidecarHandler(w http.ResponseWriter, r *http.Request) { return } - result, storageErr := a.dataStoreClient.Read(r.Context(), beaconBlockHash) + result, storageErr := a.dataStoreClient.ReadBlob(r.Context(), beaconBlockHash) if storageErr != nil { if errors.Is(storageErr, storage.ErrNotFound) { errUnknownBlock.write(w) diff --git a/api/service/api_test.go b/api/service/api_test.go index 6619e6a..fb8b752 100644 --- a/api/service/api_test.go +++ b/api/service/api_test.go @@ -92,10 +92,10 @@ func TestAPIService(t *testing.T) { }, } - err := fs.Write(context.Background(), blockOne) + err := fs.WriteBlob(context.Background(), blockOne) require.NoError(t, err) - err = fs.Write(context.Background(), blockTwo) + err = fs.WriteBlob(context.Background(), blockTwo) require.NoError(t, err) beaconClient.Headers["finalized"] = &v1.BeaconBlockHeader{ diff --git a/archiver/service/archiver.go b/archiver/service/archiver.go index caf7f0f..f08d2d7 100644 --- a/archiver/service/archiver.go +++ b/archiver/service/archiver.go @@ -119,7 +119,7 @@ func (a *Archiver) persistBlobsForBlockToS3(ctx context.Context, blockIdentifier } // The blob that is being written has not been validated. It is assumed that the beacon node is trusted. - err = a.dataStoreClient.Write(ctx, blobData) + err = a.dataStoreClient.WriteBlob(ctx, blobData) if err != nil { a.log.Error("failed to write blob", "err", err) @@ -135,32 +135,66 @@ func (a *Archiver) persistBlobsForBlockToS3(ctx context.Context, blockIdentifier // to the archivers storage or the origin block in the configuration. This is used to ensure that any gaps can be filled. // If an error is encountered persisting a block, it will retry after waiting for a period of time. func (a *Archiver) backfillBlobs(ctx context.Context, latest *v1.BeaconBlockHeader) { - current, alreadyExists, err := latest, false, error(nil) - - defer func() { - a.log.Info("backfill complete", "endHash", current.Root.String(), "startHash", latest.Root.String()) - }() + // Add backfill process that starts at latest slot, then loop through all backfill processes + backfillProcesses, err := a.dataStoreClient.ReadBackfillProcesses(ctx) + if err != nil { + a.log.Crit("failed to read backfill_processes", "err", err) + } + backfillProcesses[common.Hash(latest.Root)] = storage.BackfillProcess{Start: *latest, Current: *latest} + a.dataStoreClient.WriteBackfillProcesses(ctx, backfillProcesses) + + backfillLoop := func(start *v1.BeaconBlockHeader, current *v1.BeaconBlockHeader) { + curr, alreadyExists, err := current, false, error(nil) + count := 0 + a.log.Info("backfill process initiated", + "currHash", curr.Root.String(), + "currSlot", curr.Header.Message.Slot, + "startHash", start.Root.String(), + "startSlot", start.Header.Message.Slot, + ) + + defer func() { + a.log.Info("backfill process complete", + "endHash", curr.Root.String(), + "endSlot", curr.Header.Message.Slot, + "startHash", start.Root.String(), + "startSlot", start.Header.Message.Slot, + ) + delete(backfillProcesses, common.Hash(start.Root)) + a.dataStoreClient.WriteBackfillProcesses(ctx, backfillProcesses) + }() + + for !alreadyExists { + previous := curr + + if common.Hash(curr.Root) == a.cfg.OriginBlock { + a.log.Info("reached origin block", "hash", curr.Root.String()) + return + } - for !alreadyExists { - previous := current + curr, alreadyExists, err = a.persistBlobsForBlockToS3(ctx, previous.Header.Message.ParentRoot.String(), false) + if err != nil { + a.log.Error("failed to persist blobs for block, will retry", "err", err, "hash", previous.Header.Message.ParentRoot.String()) + // Revert back to block we failed to fetch + curr = previous + time.Sleep(backfillErrorRetryInterval) + continue + } - if common.Hash(current.Root) == a.cfg.OriginBlock { - a.log.Info("reached origin block", "hash", current.Root.String()) - return - } + if !alreadyExists { + a.metrics.RecordProcessedBlock(metrics.BlockSourceBackfill) + } - current, alreadyExists, err = a.persistBlobsForBlockToS3(ctx, previous.Header.Message.ParentRoot.String(), false) - if err != nil { - a.log.Error("failed to persist blobs for block, will retry", "err", err, "hash", previous.Header.Message.ParentRoot.String()) - // Revert back to block we failed to fetch - current = previous - time.Sleep(backfillErrorRetryInterval) - continue + count++ + if count%10 == 0 { + backfillProcesses[common.Hash(start.Root)] = storage.BackfillProcess{Start: *start, Current: *curr} + a.dataStoreClient.WriteBackfillProcesses(ctx, backfillProcesses) + } } + } - if !alreadyExists { - a.metrics.RecordProcessedBlock(metrics.BlockSourceBackfill) - } + for _, process := range backfillProcesses { + backfillLoop(&process.Start, &process.Current) } } diff --git a/archiver/service/archiver_test.go b/archiver/service/archiver_test.go index 7733649..978ac6e 100644 --- a/archiver/service/archiver_test.go +++ b/archiver/service/archiver_test.go @@ -89,7 +89,7 @@ func TestArchiver_BackfillToOrigin(t *testing.T) { svc, fs := setup(t, beacon) // We have the current head, which is block 5 written to storage - err := fs.Write(context.Background(), storage.BlobData{ + err := fs.WriteBlob(context.Background(), storage.BlobData{ Header: storage.Header{ BeaconBlockHash: blobtest.Five, }, @@ -119,7 +119,7 @@ func TestArchiver_BackfillToExistingBlock(t *testing.T) { svc, fs := setup(t, beacon) // We have the current head, which is block 5 written to storage - err := fs.Write(context.Background(), storage.BlobData{ + err := fs.WriteBlob(context.Background(), storage.BlobData{ Header: storage.Header{ BeaconBlockHash: blobtest.Five, }, @@ -130,7 +130,7 @@ func TestArchiver_BackfillToExistingBlock(t *testing.T) { require.NoError(t, err) // We also have block 1 written to storage - err = fs.Write(context.Background(), storage.BlobData{ + err = fs.WriteBlob(context.Background(), storage.BlobData{ Header: storage.Header{ BeaconBlockHash: blobtest.One, }, @@ -156,13 +156,92 @@ func TestArchiver_BackfillToExistingBlock(t *testing.T) { require.NoError(t, err) require.True(t, exists) - data, err := fs.Read(context.Background(), blob) + data, err := fs.ReadBlob(context.Background(), blob) require.NoError(t, err) require.NotNil(t, data) require.Equal(t, data.BlobSidecars.Data, beacon.Blobs[blob.String()]) } } +func TestArchiver_BackfillFinishOldProcess(t *testing.T) { + beacon := beacontest.NewDefaultStubBeaconClient(t) + svc, fs := setup(t, beacon) + + // We have the current head, which is block 5 written to storage + err := fs.WriteBlob(context.Background(), storage.BlobData{ + Header: storage.Header{ + BeaconBlockHash: blobtest.Five, + }, + BlobSidecars: storage.BlobSidecars{ + Data: beacon.Blobs[blobtest.Five.String()], + }, + }) + require.NoError(t, err) + + // We also have block 3 written to storage + err = fs.WriteBlob(context.Background(), storage.BlobData{ + Header: storage.Header{ + BeaconBlockHash: blobtest.Three, + }, + BlobSidecars: storage.BlobSidecars{ + Data: beacon.Blobs[blobtest.Three.String()], + }, + }) + require.NoError(t, err) + + // We also have block 1 written to storage + err = fs.WriteBlob(context.Background(), storage.BlobData{ + Header: storage.Header{ + BeaconBlockHash: blobtest.One, + }, + BlobSidecars: storage.BlobSidecars{ + Data: beacon.Blobs[blobtest.One.String()], + }, + }) + require.NoError(t, err) + + // We expect to backfill blob 4 first, then 2 in a separate process + expectedBlobs := []common.Hash{blobtest.Four, blobtest.Two} + + for _, blob := range expectedBlobs { + exists, err := fs.Exists(context.Background(), blob) + require.NoError(t, err) + require.False(t, exists) + } + + actualProcesses, err := svc.dataStoreClient.ReadBackfillProcesses(context.Background()) + expectedProcesses := make(storage.BackfillProcesses) + require.NoError(t, err) + require.Equal(t, expectedProcesses, actualProcesses) + + expectedProcesses[blobtest.Three] = storage.BackfillProcess{Start: *beacon.Headers[blobtest.Three.String()], Current: *beacon.Headers[blobtest.Three.String()]} + err = svc.dataStoreClient.WriteBackfillProcesses(context.Background(), expectedProcesses) + require.NoError(t, err) + + actualProcesses, err = svc.dataStoreClient.ReadBackfillProcesses(context.Background()) + require.NoError(t, err) + require.Equal(t, expectedProcesses, actualProcesses) + + svc.backfillBlobs(context.Background(), beacon.Headers[blobtest.Five.String()]) + + for _, blob := range expectedBlobs { + exists, err := fs.Exists(context.Background(), blob) + require.NoError(t, err) + require.True(t, exists) + + data, err := fs.ReadBlob(context.Background(), blob) + require.NoError(t, err) + require.NotNil(t, data) + require.Equal(t, data.BlobSidecars.Data, beacon.Blobs[blob.String()]) + } + + actualProcesses, err = svc.dataStoreClient.ReadBackfillProcesses(context.Background()) + require.NoError(t, err) + svc.log.Info("backfill processes", "processes", actualProcesses) + require.Equal(t, storage.BackfillProcesses{}, actualProcesses) + +} + func TestArchiver_LatestStopsAtExistingBlock(t *testing.T) { beacon := beacontest.NewDefaultStubBeaconClient(t) svc, fs := setup(t, beacon) diff --git a/common/blobtest/helpers.go b/common/blobtest/helpers.go index e5d65a8..57d9523 100644 --- a/common/blobtest/helpers.go +++ b/common/blobtest/helpers.go @@ -17,6 +17,8 @@ var ( Three = common.Hash{3} Four = common.Hash{4} Five = common.Hash{5} + Six = common.Hash{6} + Seven = common.Hash{7} StartSlot = uint64(10) EndSlot = uint64(15) diff --git a/common/storage/file.go b/common/storage/file.go index f143f64..c9ee7e2 100644 --- a/common/storage/file.go +++ b/common/storage/file.go @@ -16,10 +16,21 @@ type FileStorage struct { } func NewFileStorage(dir string, l log.Logger) *FileStorage { - return &FileStorage{ + storage := &FileStorage{ log: l, directory: dir, } + + _, err := storage.ReadBackfillProcesses(context.Background()) + if err == ErrNotFound { + storage.log.Info("creating empty backfill_processes object") + err = storage.WriteBackfillProcesses(context.Background(), BackfillProcesses{}) + if err != nil { + storage.log.Crit("failed to create backfill_processes file") + } + } + + return storage } func (s *FileStorage) Exists(_ context.Context, hash common.Hash) (bool, error) { @@ -33,7 +44,7 @@ func (s *FileStorage) Exists(_ context.Context, hash common.Hash) (bool, error) return true, nil } -func (s *FileStorage) Read(_ context.Context, hash common.Hash) (BlobData, error) { +func (s *FileStorage) ReadBlob(_ context.Context, hash common.Hash) (BlobData, error) { data, err := os.ReadFile(s.fileName(hash)) if err != nil { if os.IsNotExist(err) { @@ -51,7 +62,47 @@ func (s *FileStorage) Read(_ context.Context, hash common.Hash) (BlobData, error return result, nil } -func (s *FileStorage) Write(_ context.Context, data BlobData) error { +func (s *FileStorage) ReadBackfillProcesses(ctx context.Context) (BackfillProcesses, error) { + BackfillMu.Lock() + defer BackfillMu.Unlock() + + data, err := os.ReadFile(path.Join(s.directory, "backfill_processes")) + if err != nil { + if os.IsNotExist(err) { + return BackfillProcesses{}, ErrNotFound + } + + return BackfillProcesses{}, err + } + var result BackfillProcesses + err = json.Unmarshal(data, &result) + if err != nil { + s.log.Warn("error decoding backfill_processes", "err", err) + return BackfillProcesses{}, ErrMarshaling + } + return result, nil +} + +func (s *FileStorage) WriteBackfillProcesses(_ context.Context, data BackfillProcesses) error { + BackfillMu.Lock() + defer BackfillMu.Unlock() + + b, err := json.Marshal(data) + if err != nil { + s.log.Warn("error encoding backfill_processes", "err", err) + return ErrMarshaling + } + err = os.WriteFile(path.Join(s.directory, "backfill_processes"), b, 0644) + if err != nil { + s.log.Warn("error writing backfill_processes", "err", err) + return err + } + + s.log.Info("wrote backfill_processes") + return nil +} + +func (s *FileStorage) WriteBlob(_ context.Context, data BlobData) error { b, err := json.Marshal(data) if err != nil { s.log.Warn("error encoding blob", "err", err) diff --git a/common/storage/file_test.go b/common/storage/file_test.go index b987099..f8baac4 100644 --- a/common/storage/file_test.go +++ b/common/storage/file_test.go @@ -29,7 +29,7 @@ func runTestExists(t *testing.T, s DataStore) { require.NoError(t, err) require.False(t, exists) - err = s.Write(context.Background(), BlobData{ + err = s.WriteBlob(context.Background(), BlobData{ Header: Header{ BeaconBlockHash: id, }, @@ -52,11 +52,11 @@ func TestExists(t *testing.T) { func runTestRead(t *testing.T, s DataStore) { id := common.Hash{1, 2, 3} - _, err := s.Read(context.Background(), id) + _, err := s.ReadBlob(context.Background(), id) require.Error(t, err) require.True(t, errors.Is(err, ErrNotFound)) - err = s.Write(context.Background(), BlobData{ + err = s.WriteBlob(context.Background(), BlobData{ Header: Header{ BeaconBlockHash: id, }, @@ -64,7 +64,7 @@ func runTestRead(t *testing.T, s DataStore) { }) require.NoError(t, err) - data, err := s.Read(context.Background(), id) + data, err := s.ReadBlob(context.Background(), id) require.NoError(t, err) require.Equal(t, id, data.Header.BeaconBlockHash) } @@ -84,14 +84,14 @@ func TestBrokenStorage(t *testing.T) { // Delete the directory to simulate broken storage cleanup() - _, err := fs.Read(context.Background(), id) + _, err := fs.ReadBlob(context.Background(), id) require.Error(t, err) exists, err := fs.Exists(context.Background(), id) require.False(t, exists) require.NoError(t, err) // No error should be returned, as in this test we've just delted the directory - err = fs.Write(context.Background(), BlobData{ + err = fs.WriteBlob(context.Background(), BlobData{ Header: Header{ BeaconBlockHash: id, }, @@ -109,7 +109,7 @@ func TestReadInvalidData(t *testing.T) { err := os.WriteFile(fs.fileName(id), []byte("invalid json"), 0644) require.NoError(t, err) - _, err = fs.Read(context.Background(), id) + _, err = fs.ReadBlob(context.Background(), id) require.Error(t, err) require.True(t, errors.Is(err, ErrMarshaling)) } diff --git a/common/storage/s3.go b/common/storage/s3.go index bf68e92..44578e2 100644 --- a/common/storage/s3.go +++ b/common/storage/s3.go @@ -40,13 +40,24 @@ func NewS3Storage(cfg flags.S3Config, l log.Logger) (*S3Storage, error) { return nil, err } - return &S3Storage{ + storage := &S3Storage{ s3: client, bucket: cfg.Bucket, path: cfg.Path, log: l, compress: cfg.Compress, - }, nil + } + + _, err = storage.ReadBackfillProcesses(context.Background()) + if err == ErrNotFound { + storage.log.Info("creating empty backfill_processes object") + err = storage.WriteBackfillProcesses(context.Background(), BackfillProcesses{}) + if err != nil { + log.Crit("failed to create backfill_processes key") + } + } + + return storage, nil } func (s *S3Storage) Exists(ctx context.Context, hash common.Hash) (bool, error) { @@ -63,7 +74,7 @@ func (s *S3Storage) Exists(ctx context.Context, hash common.Hash) (bool, error) return true, nil } -func (s *S3Storage) Read(ctx context.Context, hash common.Hash) (BlobData, error) { +func (s *S3Storage) ReadBlob(ctx context.Context, hash common.Hash) (BlobData, error) { res, err := s.s3.GetObject(ctx, s.bucket, path.Join(s.path, hash.String()), minio.GetObjectOptions{}) if err != nil { s.log.Info("unexpected error fetching blob", "hash", hash.String(), "err", err) @@ -103,7 +114,75 @@ func (s *S3Storage) Read(ctx context.Context, hash common.Hash) (BlobData, error return data, nil } -func (s *S3Storage) Write(ctx context.Context, data BlobData) error { +func (s *S3Storage) ReadBackfillProcesses(ctx context.Context) (BackfillProcesses, error) { + BackfillMu.Lock() + defer BackfillMu.Unlock() + + res, err := s.s3.GetObject(ctx, s.bucket, path.Join(s.path, "backfill_processes"), minio.GetObjectOptions{}) + if err != nil { + s.log.Info("unexpected error fetching backfill_processes", "err", err) + return BackfillProcesses{}, ErrStorage + } + defer res.Close() + stat, err := res.Stat() + if err != nil { + errResponse := minio.ToErrorResponse(err) + if errResponse.Code == "NoSuchKey" { + s.log.Info("unable to find backfill_processes key") + return BackfillProcesses{}, ErrNotFound + } else { + s.log.Info("unexpected error fetching backfill_processes", "err", err) + return BackfillProcesses{}, ErrStorage + } + } + + var reader io.ReadCloser = res + defer reader.Close() + + if stat.Metadata.Get("Content-Encoding") == "gzip" { + reader, err = gzip.NewReader(reader) + if err != nil { + s.log.Warn("error creating gzip reader", "err", err) + return BackfillProcesses{}, ErrMarshaling + } + } + + var data BackfillProcesses + err = json.NewDecoder(reader).Decode(&data) + if err != nil { + s.log.Warn("error decoding backfill_processes", "err", err) + return BackfillProcesses{}, ErrMarshaling + } + + return data, nil +} + +func (s *S3Storage) WriteBackfillProcesses(ctx context.Context, data BackfillProcesses) error { + BackfillMu.Lock() + defer BackfillMu.Unlock() + + d, err := json.Marshal(data) + if err != nil { + s.log.Warn("error encoding backfill_processes", "err", err) + return ErrMarshaling + } + + options := minio.PutObjectOptions{ + ContentType: "application/json", + } + reader := bytes.NewReader(d) + + _, err = s.s3.PutObject(ctx, s.bucket, path.Join(s.path, "backfill_processes"), reader, int64(len(d)), options) + if err != nil { + s.log.Warn("error writing to backfill_processes", "err", err) + return ErrStorage + } + + s.log.Info("wrote to backfill_processes") + return nil +} + +func (s *S3Storage) WriteBlob(ctx context.Context, data BlobData) error { b, err := json.Marshal(data) if err != nil { s.log.Warn("error encoding blob", "err", err) diff --git a/common/storage/storage.go b/common/storage/storage.go index 57d2468..f1bbab4 100644 --- a/common/storage/storage.go +++ b/common/storage/storage.go @@ -3,7 +3,9 @@ package storage import ( "context" "errors" + "sync" + v1 "github.com/attestantio/go-eth2-client/api/v1" "github.com/attestantio/go-eth2-client/spec/deneb" "github.com/base-org/blob-archiver/common/flags" "github.com/ethereum/go-ethereum/common" @@ -62,6 +64,18 @@ type BlobData struct { BlobSidecars BlobSidecars `json:"blob_sidecars"` } +var BackfillMu sync.Mutex + +type BackfillProcess struct { + Start v1.BeaconBlockHeader `json:"start_block"` + Current v1.BeaconBlockHeader `json:"current_block"` +} + +// BackfillProcesses maps backfill start block hash --> BackfillProcess. This allows us to track +// multiple processes and reengage a previous backfill in case an archiver restart interrupted +// an active backfill +type BackfillProcesses map[common.Hash]BackfillProcess + // DataStoreReader is the interface for reading from a data store. type DataStoreReader interface { // Exists returns true if the given blob hash exists in the data store, false otherwise. @@ -69,22 +83,24 @@ type DataStoreReader interface { // - nil: the existence check was successful. In this case the boolean should also be set correctly. // - ErrStorage: there was an error accessing the data store. Exists(ctx context.Context, hash common.Hash) (bool, error) - // Read reads the blob data for the given beacon block hash from the data store. + // ReadBlob reads the blob data for the given beacon block hash from the data store. // It should return one of the following: // - nil: reading the blob was successful. The blob data is also returned. // - ErrNotFound: the blob data was not found in the data store. // - ErrStorage: there was an error accessing the data store. // - ErrMarshaling: there was an error decoding the blob data. - Read(ctx context.Context, hash common.Hash) (BlobData, error) + ReadBlob(ctx context.Context, hash common.Hash) (BlobData, error) + ReadBackfillProcesses(ctx context.Context) (BackfillProcesses, error) } // DataStoreWriter is the interface for writing to a data store. type DataStoreWriter interface { - // Write writes the given blob data to the data store. It should return one of the following errors: + // WriteBlob writes the given blob data to the data store. It should return one of the following errors: // - nil: writing the blob was successful. // - ErrStorage: there was an error accessing the data store. // - ErrMarshaling: there was an error encoding the blob data. - Write(ctx context.Context, data BlobData) error + WriteBlob(ctx context.Context, data BlobData) error + WriteBackfillProcesses(ctx context.Context, data BackfillProcesses) error } // DataStore is the interface for a data store that can be both written to and read from. diff --git a/common/storage/storagetest/stub.go b/common/storage/storagetest/stub.go index 0389e60..60900ae 100644 --- a/common/storage/storagetest/stub.go +++ b/common/storage/storagetest/stub.go @@ -26,13 +26,13 @@ func (s *TestFileStorage) WritesFailTimes(times int) { s.writeFailCount = times } -func (s *TestFileStorage) Write(_ context.Context, data storage.BlobData) error { +func (s *TestFileStorage) WriteBlob(_ context.Context, data storage.BlobData) error { if s.writeFailCount > 0 { s.writeFailCount-- return storage.ErrStorage } - return s.FileStorage.Write(context.Background(), data) + return s.FileStorage.WriteBlob(context.Background(), data) } func (fs *TestFileStorage) CheckExistsOrFail(t *testing.T, hash common.Hash) { @@ -48,12 +48,12 @@ func (fs *TestFileStorage) CheckNotExistsOrFail(t *testing.T, hash common.Hash) } func (fs *TestFileStorage) WriteOrFail(t *testing.T, data storage.BlobData) { - err := fs.Write(context.Background(), data) + err := fs.WriteBlob(context.Background(), data) require.NoError(t, err) } func (fs *TestFileStorage) ReadOrFail(t *testing.T, hash common.Hash) storage.BlobData { - data, err := fs.Read(context.Background(), hash) + data, err := fs.ReadBlob(context.Background(), hash) require.NoError(t, err) require.NotNil(t, data) return data From d0e42fbf41d9218595a55e3eb8c044bd4cc9ddd9 Mon Sep 17 00:00:00 2001 From: Samuel Stokes Date: Tue, 18 Jun 2024 13:55:01 -0400 Subject: [PATCH 06/11] Remove compression option for backfill_processes file --- common/storage/s3.go | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/common/storage/s3.go b/common/storage/s3.go index 44578e2..126a066 100644 --- a/common/storage/s3.go +++ b/common/storage/s3.go @@ -124,7 +124,7 @@ func (s *S3Storage) ReadBackfillProcesses(ctx context.Context) (BackfillProcesse return BackfillProcesses{}, ErrStorage } defer res.Close() - stat, err := res.Stat() + _, err = res.Stat() if err != nil { errResponse := minio.ToErrorResponse(err) if errResponse.Code == "NoSuchKey" { @@ -139,14 +139,6 @@ func (s *S3Storage) ReadBackfillProcesses(ctx context.Context) (BackfillProcesse var reader io.ReadCloser = res defer reader.Close() - if stat.Metadata.Get("Content-Encoding") == "gzip" { - reader, err = gzip.NewReader(reader) - if err != nil { - s.log.Warn("error creating gzip reader", "err", err) - return BackfillProcesses{}, ErrMarshaling - } - } - var data BackfillProcesses err = json.NewDecoder(reader).Decode(&data) if err != nil { From 2437e3bfff0135ada06b663c7d776173f74261d0 Mon Sep 17 00:00:00 2001 From: Samuel Stokes Date: Wed, 19 Jun 2024 00:07:37 -0400 Subject: [PATCH 07/11] Use lockfile to prevent multiple archivers from writing to same storage --- archiver/service/archiver.go | 55 +++++++++++++++++++++++++++++++ common/storage/file.go | 47 +++++++++++++++++++++++++-- common/storage/s3.go | 63 ++++++++++++++++++++++++++++++++++++ common/storage/storage.go | 7 ++++ 4 files changed, 170 insertions(+), 2 deletions(-) diff --git a/archiver/service/archiver.go b/archiver/service/archiver.go index f08d2d7..bc20c4d 100644 --- a/archiver/service/archiver.go +++ b/archiver/service/archiver.go @@ -15,6 +15,7 @@ import ( "github.com/ethereum-optimism/optimism/op-service/retry" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" + "github.com/google/uuid" ) const ( @@ -37,6 +38,7 @@ func NewArchiver(l log.Logger, cfg flags.ArchiverConfig, dataStoreClient storage metrics: m, beaconClient: client, stopCh: make(chan struct{}), + id: uuid.New().String(), }, nil } @@ -47,6 +49,7 @@ type Archiver struct { beaconClient BeaconClient metrics metrics.Metricer stopCh chan struct{} + id string } // Start starts archiving blobs. It begins polling the beacon node for the latest blocks and persisting blobs for @@ -63,6 +66,8 @@ func (a *Archiver) Start(ctx context.Context) error { return err } + a.waitObtainStorageLock(ctx) + go a.backfillBlobs(ctx, currentBlock) return a.trackLatestBlocks(ctx) @@ -131,6 +136,56 @@ func (a *Archiver) persistBlobsForBlockToS3(ctx context.Context, blockIdentifier return currentHeader.Data, exists, nil } +const ( + LockUpdateInterval = 10 * time.Second + ObtainLockRetryInterval = 10 * time.Second + LockTimeout = int64(20) // 20 seconds +) + +func (a *Archiver) waitObtainStorageLock(ctx context.Context) { + lockfile, err := a.dataStoreClient.ReadLockfile(ctx) + if err != nil { + a.log.Crit("failed to read lockfile", "err", err) + } + + currentTime := time.Now().Unix() + emptyLockfile := storage.Lockfile{} + if lockfile != emptyLockfile { + for lockfile.ArchiverId != a.id && lockfile.Timestamp+LockTimeout > currentTime { + // Loop until the timestamp read from storage is expired + time.Sleep(ObtainLockRetryInterval) + lockfile, err = a.dataStoreClient.ReadLockfile(ctx) + if err != nil { + a.log.Crit("failed to read lockfile", "err", err) + } + currentTime = time.Now().Unix() + } + } + + err = a.dataStoreClient.WriteLockfile(ctx, storage.Lockfile{ArchiverId: a.id, Timestamp: currentTime}) + if err != nil { + a.log.Crit("failed to write to lockfile: %v", err) + } + + go func() { + // Retain storage lock by continually updating the stored timestamp + ticker := time.NewTicker(LockUpdateInterval) + for { + select { + case <-ticker.C: + currentTime := time.Now().Unix() + err := a.dataStoreClient.WriteLockfile(ctx, storage.Lockfile{ArchiverId: a.id, Timestamp: currentTime}) + if err != nil { + a.log.Error("failed to update lockfile timestamp", "err", err) + } + case <-ctx.Done(): + ticker.Stop() + return + } + } + }() +} + // backfillBlobs will persist all blobs from the provided beacon block header, to either the last block that was persisted // to the archivers storage or the origin block in the configuration. This is used to ensure that any gaps can be filled. // If an error is encountered persisting a block, it will retry after waiting for a period of time. diff --git a/common/storage/file.go b/common/storage/file.go index c9ee7e2..46a6b34 100644 --- a/common/storage/file.go +++ b/common/storage/file.go @@ -23,10 +23,19 @@ func NewFileStorage(dir string, l log.Logger) *FileStorage { _, err := storage.ReadBackfillProcesses(context.Background()) if err == ErrNotFound { - storage.log.Info("creating empty backfill_processes object") + storage.log.Info("creating empty backfill_processes file") err = storage.WriteBackfillProcesses(context.Background(), BackfillProcesses{}) if err != nil { - storage.log.Crit("failed to create backfill_processes file") + storage.log.Crit("failed to create empty backfill_processes file", "err", err) + } + } + + _, err = storage.ReadLockfile(context.Background()) + if err == ErrNotFound { + storage.log.Info("creating empty lockfile file") + err = storage.WriteLockfile(context.Background(), Lockfile{}) + if err != nil { + storage.log.Crit("failed to create empty lockfile file", "err", err) } } @@ -83,6 +92,24 @@ func (s *FileStorage) ReadBackfillProcesses(ctx context.Context) (BackfillProces return result, nil } +func (s *FileStorage) ReadLockfile(ctx context.Context) (Lockfile, error) { + data, err := os.ReadFile(path.Join(s.directory, "lockfile")) + if err != nil { + if os.IsNotExist(err) { + return Lockfile{}, ErrNotFound + } + + return Lockfile{}, err + } + var result Lockfile + err = json.Unmarshal(data, &result) + if err != nil { + s.log.Warn("error decoding lockfile", "err", err) + return Lockfile{}, ErrMarshaling + } + return result, nil +} + func (s *FileStorage) WriteBackfillProcesses(_ context.Context, data BackfillProcesses) error { BackfillMu.Lock() defer BackfillMu.Unlock() @@ -102,6 +129,22 @@ func (s *FileStorage) WriteBackfillProcesses(_ context.Context, data BackfillPro return nil } +func (s *FileStorage) WriteLockfile(_ context.Context, data Lockfile) error { + b, err := json.Marshal(data) + if err != nil { + s.log.Warn("error encoding lockfile", "err", err) + return ErrMarshaling + } + err = os.WriteFile(path.Join(s.directory, "lockfile"), b, 0644) + if err != nil { + s.log.Warn("error writing lockfile", "err", err) + return err + } + + s.log.Info("wrote to lockfile", "archiverId", data.ArchiverId, "timestamp", data.Timestamp) + return nil +} + func (s *FileStorage) WriteBlob(_ context.Context, data BlobData) error { b, err := json.Marshal(data) if err != nil { diff --git a/common/storage/s3.go b/common/storage/s3.go index 126a066..c2b390f 100644 --- a/common/storage/s3.go +++ b/common/storage/s3.go @@ -57,6 +57,15 @@ func NewS3Storage(cfg flags.S3Config, l log.Logger) (*S3Storage, error) { } } + _, err = storage.ReadLockfile(context.Background()) + if err == ErrNotFound { + storage.log.Info("creating empty lockfile object") + err = storage.WriteLockfile(context.Background(), Lockfile{}) + if err != nil { + log.Crit("failed to create backfill_processes key") + } + } + return storage, nil } @@ -149,6 +158,38 @@ func (s *S3Storage) ReadBackfillProcesses(ctx context.Context) (BackfillProcesse return data, nil } +func (s *S3Storage) ReadLockfile(ctx context.Context) (Lockfile, error) { + res, err := s.s3.GetObject(ctx, s.bucket, path.Join(s.path, "lockfile"), minio.GetObjectOptions{}) + if err != nil { + s.log.Info("unexpected error fetching lockfile", "err", err) + return Lockfile{}, ErrStorage + } + defer res.Close() + _, err = res.Stat() + if err != nil { + errResponse := minio.ToErrorResponse(err) + if errResponse.Code == "NoSuchKey" { + s.log.Info("unable to find lockfile key") + return Lockfile{}, ErrNotFound + } else { + s.log.Info("unexpected error fetching lockfile", "err", err) + return Lockfile{}, ErrStorage + } + } + + var reader io.ReadCloser = res + defer reader.Close() + + var data Lockfile + err = json.NewDecoder(reader).Decode(&data) + if err != nil { + s.log.Warn("error decoding lockfile", "err", err) + return Lockfile{}, ErrMarshaling + } + + return data, nil +} + func (s *S3Storage) WriteBackfillProcesses(ctx context.Context, data BackfillProcesses) error { BackfillMu.Lock() defer BackfillMu.Unlock() @@ -174,6 +215,28 @@ func (s *S3Storage) WriteBackfillProcesses(ctx context.Context, data BackfillPro return nil } +func (s *S3Storage) WriteLockfile(ctx context.Context, data Lockfile) error { + d, err := json.Marshal(data) + if err != nil { + s.log.Warn("error encoding lockfile", "err", err) + return ErrMarshaling + } + + options := minio.PutObjectOptions{ + ContentType: "application/json", + } + reader := bytes.NewReader(d) + + _, err = s.s3.PutObject(ctx, s.bucket, path.Join(s.path, "lockfile"), reader, int64(len(d)), options) + if err != nil { + s.log.Warn("error writing to lockfile", "err", err) + return ErrStorage + } + + s.log.Info("wrote to lockfile", "archiverId", data.ArchiverId, "timestamp", data.Timestamp) + return nil +} + func (s *S3Storage) WriteBlob(ctx context.Context, data BlobData) error { b, err := json.Marshal(data) if err != nil { diff --git a/common/storage/storage.go b/common/storage/storage.go index f1bbab4..3ebcee2 100644 --- a/common/storage/storage.go +++ b/common/storage/storage.go @@ -71,6 +71,11 @@ type BackfillProcess struct { Current v1.BeaconBlockHeader `json:"current_block"` } +type Lockfile struct { + ArchiverId string `json:"archiver_id"` + Timestamp int64 `json:"timestamp"` +} + // BackfillProcesses maps backfill start block hash --> BackfillProcess. This allows us to track // multiple processes and reengage a previous backfill in case an archiver restart interrupted // an active backfill @@ -91,6 +96,7 @@ type DataStoreReader interface { // - ErrMarshaling: there was an error decoding the blob data. ReadBlob(ctx context.Context, hash common.Hash) (BlobData, error) ReadBackfillProcesses(ctx context.Context) (BackfillProcesses, error) + ReadLockfile(ctx context.Context) (Lockfile, error) } // DataStoreWriter is the interface for writing to a data store. @@ -101,6 +107,7 @@ type DataStoreWriter interface { // - ErrMarshaling: there was an error encoding the blob data. WriteBlob(ctx context.Context, data BlobData) error WriteBackfillProcesses(ctx context.Context, data BackfillProcesses) error + WriteLockfile(ctx context.Context, data Lockfile) error } // DataStore is the interface for a data store that can be both written to and read from. From ffd4787844f77fc215caef67c4c98d0e71074f80 Mon Sep 17 00:00:00 2001 From: Samuel Stokes Date: Wed, 19 Jun 2024 07:26:04 -0400 Subject: [PATCH 08/11] Add logs to give feedback during waitObtainStorageLock --- archiver/service/archiver.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/archiver/service/archiver.go b/archiver/service/archiver.go index bc20c4d..956aa07 100644 --- a/archiver/service/archiver.go +++ b/archiver/service/archiver.go @@ -153,6 +153,7 @@ func (a *Archiver) waitObtainStorageLock(ctx context.Context) { if lockfile != emptyLockfile { for lockfile.ArchiverId != a.id && lockfile.Timestamp+LockTimeout > currentTime { // Loop until the timestamp read from storage is expired + a.log.Info("attempting to obtain storage lock") time.Sleep(ObtainLockRetryInterval) lockfile, err = a.dataStoreClient.ReadLockfile(ctx) if err != nil { @@ -166,6 +167,7 @@ func (a *Archiver) waitObtainStorageLock(ctx context.Context) { if err != nil { a.log.Crit("failed to write to lockfile: %v", err) } + a.log.Info("obtained storage lock") go func() { // Retain storage lock by continually updating the stored timestamp From 1d743babdba3b743f68586b943f66f063ac36107 Mon Sep 17 00:00:00 2001 From: Samuel Stokes Date: Thu, 20 Jun 2024 10:03:58 -0400 Subject: [PATCH 09/11] Improve format when logging timestamp --- common/storage/file.go | 3 ++- common/storage/s3.go | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/common/storage/file.go b/common/storage/file.go index 46a6b34..eebcfda 100644 --- a/common/storage/file.go +++ b/common/storage/file.go @@ -5,6 +5,7 @@ import ( "encoding/json" "os" "path" + "strconv" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" @@ -141,7 +142,7 @@ func (s *FileStorage) WriteLockfile(_ context.Context, data Lockfile) error { return err } - s.log.Info("wrote to lockfile", "archiverId", data.ArchiverId, "timestamp", data.Timestamp) + s.log.Info("wrote to lockfile", "archiverId", data.ArchiverId, "timestamp", strconv.FormatInt(data.Timestamp, 10)) return nil } diff --git a/common/storage/s3.go b/common/storage/s3.go index c2b390f..9757e4b 100644 --- a/common/storage/s3.go +++ b/common/storage/s3.go @@ -7,6 +7,7 @@ import ( "encoding/json" "io" "path" + "strconv" "github.com/base-org/blob-archiver/common/flags" "github.com/ethereum/go-ethereum/common" @@ -233,7 +234,7 @@ func (s *S3Storage) WriteLockfile(ctx context.Context, data Lockfile) error { return ErrStorage } - s.log.Info("wrote to lockfile", "archiverId", data.ArchiverId, "timestamp", data.Timestamp) + s.log.Info("wrote to lockfile", "archiverId", data.ArchiverId, "timestamp", strconv.FormatInt(data.Timestamp, 10)) return nil } From a0019b18c64b9a07448aa1ffe8213833cabb7ecb Mon Sep 17 00:00:00 2001 From: Samuel Stokes Date: Thu, 20 Jun 2024 10:04:24 -0400 Subject: [PATCH 10/11] Add test for archiver.waitObtainStorageLock --- archiver/service/archiver.go | 13 +++++++------ archiver/service/archiver_test.go | 18 ++++++++++++++++++ 2 files changed, 25 insertions(+), 6 deletions(-) diff --git a/archiver/service/archiver.go b/archiver/service/archiver.go index 956aa07..242d946 100644 --- a/archiver/service/archiver.go +++ b/archiver/service/archiver.go @@ -136,11 +136,9 @@ func (a *Archiver) persistBlobsForBlockToS3(ctx context.Context, blockIdentifier return currentHeader.Data, exists, nil } -const ( - LockUpdateInterval = 10 * time.Second - ObtainLockRetryInterval = 10 * time.Second - LockTimeout = int64(20) // 20 seconds -) +const LockUpdateInterval = 10 * time.Second +const LockTimeout = int64(20) // 20 seconds +var ObtainLockRetryInterval = 10 * time.Second func (a *Archiver) waitObtainStorageLock(ctx context.Context) { lockfile, err := a.dataStoreClient.ReadLockfile(ctx) @@ -153,7 +151,10 @@ func (a *Archiver) waitObtainStorageLock(ctx context.Context) { if lockfile != emptyLockfile { for lockfile.ArchiverId != a.id && lockfile.Timestamp+LockTimeout > currentTime { // Loop until the timestamp read from storage is expired - a.log.Info("attempting to obtain storage lock") + a.log.Info("waiting for storage lock timestamp to expire", + "timestamp", strconv.FormatInt(lockfile.Timestamp, 10), + "currentTime", strconv.FormatInt(currentTime, 10), + ) time.Sleep(ObtainLockRetryInterval) lockfile, err = a.dataStoreClient.ReadLockfile(ctx) if err != nil { diff --git a/archiver/service/archiver_test.go b/archiver/service/archiver_test.go index 978ac6e..b6c4dee 100644 --- a/archiver/service/archiver_test.go +++ b/archiver/service/archiver_test.go @@ -163,6 +163,24 @@ func TestArchiver_BackfillToExistingBlock(t *testing.T) { } } +func TestArchiver_ObtainLockfile(t *testing.T) { + beacon := beacontest.NewDefaultStubBeaconClient(t) + svc, _ := setup(t, beacon) + + currentTime := time.Now().Unix() + expiredTime := currentTime - 19 + err := svc.dataStoreClient.WriteLockfile(context.Background(), storage.Lockfile{ArchiverId: "FAKEID", Timestamp: expiredTime}) + require.NoError(t, err) + + ObtainLockRetryInterval = 1 * time.Second + svc.waitObtainStorageLock(context.Background()) + + lockfile, err := svc.dataStoreClient.ReadLockfile(context.Background()) + require.NoError(t, err) + require.Equal(t, svc.id, lockfile.ArchiverId) + require.True(t, lockfile.Timestamp >= currentTime) +} + func TestArchiver_BackfillFinishOldProcess(t *testing.T) { beacon := beacontest.NewDefaultStubBeaconClient(t) svc, fs := setup(t, beacon) From cfba8164dedab4b0165cfea52debf30a74a944cf Mon Sep 17 00:00:00 2001 From: Samuel Stokes Date: Thu, 20 Jun 2024 21:18:15 -0400 Subject: [PATCH 11/11] go mod tidy --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index d847843..cd254be 100644 --- a/go.mod +++ b/go.mod @@ -7,6 +7,7 @@ require ( github.com/ethereum-optimism/optimism v1.7.6 github.com/ethereum/go-ethereum v1.101315.1 github.com/go-chi/chi/v5 v5.0.12 + github.com/google/uuid v1.6.0 github.com/minio/minio-go/v7 v7.0.70 github.com/prometheus/client_golang v1.19.0 github.com/rs/zerolog v1.32.0 @@ -49,7 +50,6 @@ require ( github.com/gofrs/flock v0.8.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect - github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.1 // indirect github.com/holiman/uint256 v1.2.4 // indirect github.com/huandu/go-clone v1.6.0 // indirect