From 25274b9cbc0721b3635c9e53d889d9e80503f9a3 Mon Sep 17 00:00:00 2001 From: Narangde Date: Sat, 14 Sep 2024 15:30:10 +0900 Subject: [PATCH 01/29] feat(epochs): port epochs module from cosmos-sdk (#101) --- client/app/app.go | 1 + client/app/app_config.go | 9 + client/app/keepers/types.go | 2 + client/x/epochs/README.md | 195 +++++ client/x/epochs/keeper/abci.go | 85 ++ client/x/epochs/keeper/abci_test.go | 190 +++++ client/x/epochs/keeper/epoch.go | 72 ++ client/x/epochs/keeper/epoch_test.go | 105 +++ client/x/epochs/keeper/genesis.go | 31 + client/x/epochs/keeper/genesis_test.go | 99 +++ client/x/epochs/keeper/grpc_query.go | 51 ++ client/x/epochs/keeper/grpc_query_test.go | 22 + client/x/epochs/keeper/hooks.go | 27 + client/x/epochs/keeper/keeper.go | 53 ++ client/x/epochs/keeper/keeper_test.go | 91 +++ client/x/epochs/module/depinject.go | 83 ++ client/x/epochs/module/module.go | 137 ++++ client/x/epochs/module/module.proto | 12 + client/x/epochs/module/module.pulsar.go | 502 ++++++++++++ client/x/epochs/types/codec.go | 10 + client/x/epochs/types/events.pb.go | 493 ++++++++++++ client/x/epochs/types/events.proto | 17 + client/x/epochs/types/genesis.go | 72 ++ client/x/epochs/types/genesis.pb.go | 820 +++++++++++++++++++ client/x/epochs/types/genesis.proto | 60 ++ client/x/epochs/types/hooks.go | 65 ++ client/x/epochs/types/hooks_test.go | 121 +++ client/x/epochs/types/identifier.go | 22 + client/x/epochs/types/keys.go | 16 + client/x/epochs/types/query.pb.go | 908 ++++++++++++++++++++++ client/x/epochs/types/query.proto | 32 + go.mod | 2 +- lib/netconf/local/genesis.json | 13 + 33 files changed, 4417 insertions(+), 1 deletion(-) create mode 100644 client/x/epochs/README.md create mode 100644 client/x/epochs/keeper/abci.go create mode 100644 client/x/epochs/keeper/abci_test.go create mode 100644 client/x/epochs/keeper/epoch.go create mode 100644 client/x/epochs/keeper/epoch_test.go create mode 100644 client/x/epochs/keeper/genesis.go create mode 100644 client/x/epochs/keeper/genesis_test.go create mode 100644 client/x/epochs/keeper/grpc_query.go create mode 100644 client/x/epochs/keeper/grpc_query_test.go create mode 100644 client/x/epochs/keeper/hooks.go create mode 100644 client/x/epochs/keeper/keeper.go create mode 100644 client/x/epochs/keeper/keeper_test.go create mode 100644 client/x/epochs/module/depinject.go create mode 100644 client/x/epochs/module/module.go create mode 100644 client/x/epochs/module/module.proto create mode 100644 client/x/epochs/module/module.pulsar.go create mode 100644 client/x/epochs/types/codec.go create mode 100644 client/x/epochs/types/events.pb.go create mode 100644 client/x/epochs/types/events.proto create mode 100644 client/x/epochs/types/genesis.go create mode 100644 client/x/epochs/types/genesis.pb.go create mode 100644 client/x/epochs/types/genesis.proto create mode 100644 client/x/epochs/types/hooks.go create mode 100644 client/x/epochs/types/hooks_test.go create mode 100644 client/x/epochs/types/identifier.go create mode 100644 client/x/epochs/types/keys.go create mode 100644 client/x/epochs/types/query.pb.go create mode 100644 client/x/epochs/types/query.proto diff --git a/client/app/app.go b/client/app/app.go index a59d2577..094aafd6 100644 --- a/client/app/app.go +++ b/client/app/app.go @@ -92,6 +92,7 @@ func newApp( &app.Keepers.ConsensusParamsKeeper, &app.Keepers.GovKeeper, &app.Keepers.UpgradeKeeper, + &app.Keepers.EpochsKeeper, &app.Keepers.EvmStakingKeeper, &app.Keepers.EVMEngKeeper, ); err != nil { diff --git a/client/app/app_config.go b/client/app/app_config.go index f23b65a1..faee4920 100644 --- a/client/app/app_config.go +++ b/client/app/app_config.go @@ -30,6 +30,8 @@ import ( slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types" stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + epochsmodule "github.com/piplabs/story/client/x/epochs/module" + epochstypes "github.com/piplabs/story/client/x/epochs/types" evmenginemodule "github.com/piplabs/story/client/x/evmengine/module" evmenginetypes "github.com/piplabs/story/client/x/evmengine/types" evmstakingmodule "github.com/piplabs/story/client/x/evmstaking/module" @@ -84,6 +86,7 @@ var ( genutiltypes.ModuleName, upgradetypes.ModuleName, // Story modules + epochstypes.ModuleName, evmenginetypes.ModuleName, evmstakingtypes.ModuleName, } @@ -98,6 +101,7 @@ var ( // CanWithdrawInvariant invariant. // NOTE: staking module is required if HistoricalEntries param > 0. beginBlockers = []string{ + epochstypes.ModuleName, minttypes.ModuleName, distrtypes.ModuleName, // Note: slashing happens after distr.BeginBlocker slashingtypes.ModuleName, @@ -117,6 +121,7 @@ var ( stakingtypes.BondedPoolName, stakingtypes.NotBondedPoolName, evmstakingtypes.ModuleName, + epochstypes.ModuleName, } moduleAccPerms = []*authmodulev1.ModuleAccountPermission{ @@ -196,6 +201,10 @@ var ( Name: upgradetypes.ModuleName, Config: appconfig.WrapAny(&upgrademodulev1.Module{}), }, + { + Name: epochstypes.ModuleName, + Config: appconfig.WrapAny(&epochsmodule.Module{}), + }, { Name: evmstakingtypes.ModuleName, Config: appconfig.WrapAny(&evmstakingmodule.Module{}), diff --git a/client/app/keepers/types.go b/client/app/keepers/types.go index b404cc6f..7b21abdc 100644 --- a/client/app/keepers/types.go +++ b/client/app/keepers/types.go @@ -15,6 +15,7 @@ import ( slashingkeeper "github.com/cosmos/cosmos-sdk/x/slashing/keeper" stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper" + epochskeeper "github.com/piplabs/story/client/x/epochs/keeper" evmengkeeper "github.com/piplabs/story/client/x/evmengine/keeper" evmstakingkeeper "github.com/piplabs/story/client/x/evmstaking/keeper" ) @@ -34,4 +35,5 @@ type Keepers struct { // Story EvmStakingKeeper *evmstakingkeeper.Keeper EVMEngKeeper *evmengkeeper.Keeper + EpochsKeeper *epochskeeper.Keeper } diff --git a/client/x/epochs/README.md b/client/x/epochs/README.md new file mode 100644 index 00000000..612a12de --- /dev/null +++ b/client/x/epochs/README.md @@ -0,0 +1,195 @@ +> [!NOTE] +> This `epochs` module is copied from cosmos-sdk +> [v0.52.0-alpha.1](https://github.com/cosmos/cosmos-sdk/tree/v0.52.0-alpha.1/x/epochs) + +--- +sidebar_position: 1 +--- + +# `x/epochs` + +## Abstract + +Often in the SDK, we would like to run certain code every-so often. The +purpose of `epochs` module is to allow other modules to set that they +would like to be signaled once every period. So another module can +specify it wants to execute code once a week, starting at UTC-time = x. +`epochs` creates a generalized epoch interface to other modules so that +they can easily be signaled upon such events. + +## Contents + +1. **[Concept](#concepts)** +2. **[State](#state)** +3. **[Events](#events)** +4. **[Keeper](#keepers)** +5. **[Hooks](#hooks)** +6. **[Queries](#queries)** + +## Concepts + +The epochs module defines on-chain timers that execute at fixed time intervals. +Other SDK modules can then register logic to be executed at the timer ticks. +We refer to the period in between two timer ticks as an "epoch". + +Every timer has a unique identifier. +Every epoch will have a start time, and an end time, where `end time = start time + timer interval`. +On mainnet, we only utilize one identifier, with a time interval of `one day`. + +The timer will tick at the first block whose block time is greater than the timer end time, +and set the start as the prior timer end time. (Notably, it's not set to the block time!) +This means that if the chain has been down for a while, you will get one timer tick per block, +until the timer has caught up. + +## State + +The Epochs module keeps a single `EpochInfo` per identifier. +This contains the current state of the timer with the corresponding identifier. +Its fields are modified at every timer tick. +EpochInfos are initialized as part of genesis initialization or upgrade logic, +and are only modified on begin blockers. + +## Events + +The `epochs` module emits the following events: + +### BeginBlocker + +| Type | Attribute Key | Attribute Value | +| ----------- | ------------- | --------------- | +| epoch_start | epoch_number | {epoch_number} | +| epoch_start | start_time | {start_time} | + +### EndBlocker + +| Type | Attribute Key | Attribute Value | +| --------- | ------------- | --------------- | +| epoch_end | epoch_number | {epoch_number} | + +## Keepers + +### Keeper functions + +Epochs keeper module provides utility functions to manage epochs. + +``` go +// Keeper is the interface for epochs module keeper +type Keeper interface { + // GetEpochInfo returns epoch info by identifier + GetEpochInfo(ctx sdk.Context, identifier string) types.EpochInfo + // SetEpochInfo set epoch info + SetEpochInfo(ctx sdk.Context, epoch types.EpochInfo) + // DeleteEpochInfo delete epoch info + DeleteEpochInfo(ctx sdk.Context, identifier string) + // IterateEpochInfo iterate through epochs + IterateEpochInfo(ctx sdk.Context, fn func(index int64, epochInfo types.EpochInfo) (stop bool)) + // Get all epoch infos + AllEpochInfos(ctx sdk.Context) []types.EpochInfo +} +``` + +## Hooks + +```go + // the first block whose timestamp is after the duration is counted as the end of the epoch + AfterEpochEnd(ctx sdk.Context, epochIdentifier string, epochNumber int64) + // new epoch is next block of epoch end block + BeforeEpochStart(ctx sdk.Context, epochIdentifier string, epochNumber int64) +``` + +### How modules receive hooks + +On hook receiver function of other modules, they need to filter +`epochIdentifier` and only do executions for only specific +epochIdentifier. Filtering epochIdentifier could be in `Params` of other +modules so that they can be modified by governance. + +This is the standard dev UX of this: + +```golang +func (k MyModuleKeeper) AfterEpochEnd(ctx sdk.Context, epochIdentifier string, epochNumber int64) { + params := k.GetParams(ctx) + if epochIdentifier == params.DistrEpochIdentifier { + // my logic + } +} +``` + +### Panic isolation + +If a given epoch hook panics, its state update is reverted, but we keep +proceeding through the remaining hooks. This allows more advanced epoch +logic to be used, without concern over state machine halting, or halting +subsequent modules. + +This does mean that if there is behavior you expect from a prior epoch +hook, and that epoch hook reverted, your hook may also have an issue. So +do keep in mind "what if a prior hook didn't get executed" in the safety +checks you consider for a new epoch hook. + +## Queries + +The Epochs module provides the following queries to check the module's state. + +```protobuf +service Query { + // EpochInfos provide running epochInfos + rpc EpochInfos(QueryEpochsInfoRequest) returns (QueryEpochsInfoResponse) {} + // CurrentEpoch provide current epoch of specified identifier + rpc CurrentEpoch(QueryCurrentEpochRequest) returns (QueryCurrentEpochResponse) {} +} +``` + +### Epoch Infos + +Query the currently running epochInfos + +```sh + query epochs epoch-infos +``` + +::: details Example + +An example output: + +```sh +epochs: +- current_epoch: "183" + current_epoch_start_height: "2438409" + current_epoch_start_time: "2021-12-18T17:16:09.898160996Z" + duration: 86400s + epoch_counting_started: true + identifier: day + start_time: "2021-06-18T17:00:00Z" +- current_epoch: "26" + current_epoch_start_height: "2424854" + current_epoch_start_time: "2021-12-17T17:02:07.229632445Z" + duration: 604800s + epoch_counting_started: true + identifier: week + start_time: "2021-06-18T17:00:00Z" +``` + +::: + +### Current Epoch + +Query the current epoch by the specified identifier + +```sh + query epochs current-epoch [identifier] +``` + +::: details Example + +Query the current `day` epoch: + +```sh + query epochs current-epoch day +``` + +Which in this example outputs: + +```sh +current_epoch: "183" +``` diff --git a/client/x/epochs/keeper/abci.go b/client/x/epochs/keeper/abci.go new file mode 100644 index 00000000..02ee7b53 --- /dev/null +++ b/client/x/epochs/keeper/abci.go @@ -0,0 +1,85 @@ +package keeper + +import ( + "context" + + "github.com/cosmos/cosmos-sdk/telemetry" + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/piplabs/story/client/x/epochs/types" + "github.com/piplabs/story/lib/errors" + "github.com/piplabs/story/lib/log" +) + +// BeginBlocker of epochs module. +func (k Keeper) BeginBlocker(ctx context.Context) error { + defer telemetry.ModuleMeasureSince(types.ModuleName, telemetry.Now(), telemetry.MetricKeyBeginBlocker) + + // NOTE(Narangde): Use UnwrapSDKContext instead of Environment's HeaderService + headerInfo := sdk.UnwrapSDKContext(ctx).HeaderInfo() + err := k.EpochInfo.Walk( + ctx, + nil, + func(_ string, epochInfo types.EpochInfo) (stop bool, err error) { + // If blocktime < initial epoch start time, return + if headerInfo.Time.Before(epochInfo.StartTime) { + return false, nil + } + // if epoch counting hasn't started, signal we need to start. + shouldInitialEpochStart := !epochInfo.EpochCountingStarted + + epochEndTime := epochInfo.CurrentEpochStartTime.Add(epochInfo.Duration) + shouldEpochStart := (headerInfo.Time.After(epochEndTime)) || shouldInitialEpochStart + + if !shouldEpochStart { + return false, nil + } + epochInfo.CurrentEpochStartHeight = headerInfo.Height + + if shouldInitialEpochStart { + epochInfo.EpochCountingStarted = true + epochInfo.CurrentEpoch = 1 + epochInfo.CurrentEpochStartTime = epochInfo.StartTime + log.Debug(ctx, "Starting new epoch", "epoch_identifier", epochInfo.Identifier, "current_epoch", epochInfo.CurrentEpoch) + } else { + err := k.EventService.EventManager(ctx).Emit(ctx, &types.EventEpochEnd{ + EpochNumber: epochInfo.CurrentEpoch, + }) + if err != nil { + return false, errors.Wrap(err, "emit epoch end event") + } + + if err := k.AfterEpochEnd(ctx, epochInfo.Identifier, epochInfo.CurrentEpoch); err != nil { + // purposely ignoring the error here not to halt the chain if the hook fails + log.Error(ctx, "Error after epoch end", err, "epoch_identifier", epochInfo.Identifier, "current_epoch", epochInfo.CurrentEpoch) + } + + epochInfo.CurrentEpoch++ + epochInfo.CurrentEpochStartTime = epochInfo.CurrentEpochStartTime.Add(epochInfo.Duration) + log.Debug(ctx, "Starting epoch with", "epoch_identifier", epochInfo.Identifier, "current_epoch", epochInfo.CurrentEpoch) + } + + // emit new epoch start event, set epoch info, and run BeforeEpochStart hook + err = k.EventService.EventManager(ctx).Emit(ctx, &types.EventEpochStart{ + EpochNumber: epochInfo.CurrentEpoch, + EpochStartTime: epochInfo.CurrentEpochStartTime.Unix(), + }) + if err != nil { + return false, errors.Wrap(err, "emit epoch start event") + } + err = k.EpochInfo.Set(ctx, epochInfo.Identifier, epochInfo) + if err != nil { + log.Error(ctx, "Error set epoch info", err, "epoch_identifier", epochInfo.Identifier, "current_epoch", epochInfo.CurrentEpoch) + return false, nil + } + if err := k.BeforeEpochStart(ctx, epochInfo.Identifier, epochInfo.CurrentEpoch); err != nil { + // purposely ignoring the error here not to halt the chain if the hook fails + log.Error(ctx, "Error before epoch start", err, "epoch_identifier", epochInfo.Identifier, "current_epoch", epochInfo.CurrentEpoch) + } + + return false, nil + }, + ) + + return err +} diff --git a/client/x/epochs/keeper/abci_test.go b/client/x/epochs/keeper/abci_test.go new file mode 100644 index 00000000..f1ab8f6d --- /dev/null +++ b/client/x/epochs/keeper/abci_test.go @@ -0,0 +1,190 @@ +package keeper_test + +import ( + "sort" + "testing" + "time" + + "golang.org/x/exp/maps" + + "cosmossdk.io/core/header" + + "github.com/stretchr/testify/require" + + "github.com/piplabs/story/client/x/epochs/types" +) + +// This test is responsible for testing how epochs increment based off +// of their initial conditions, and subsequent block height / times. +func (s *KeeperTestSuite) TestEpochInfoBeginBlockChanges() { + block1Time := time.Unix(1656907200, 0).UTC() + const defaultIdentifier = "hourly" + const defaultDuration = time.Hour + // eps is short for epsilon - in this case a negligible amount of time. + const eps = time.Nanosecond + + tests := map[string]struct { + // if identifier, duration is not set, we make it defaultIdentifier and defaultDuration. + // EpochCountingStarted, if unspecified, is inferred by CurrentEpoch == 0 + // StartTime is inferred to be block1Time if left blank. + initialEpochInfo types.EpochInfo + blockHeightTimePairs map[int]time.Time + expEpochInfo types.EpochInfo + }{ + "First block running at exactly start time sets epoch tick": { + initialEpochInfo: types.EpochInfo{StartTime: block1Time, CurrentEpoch: 0, CurrentEpochStartTime: time.Time{}}, + expEpochInfo: types.EpochInfo{StartTime: block1Time, CurrentEpoch: 1, CurrentEpochStartTime: block1Time, CurrentEpochStartHeight: 1}, + }, + "First block run sets start time, subsequent blocks within timer interval do not cause timer tick": { + initialEpochInfo: types.EpochInfo{StartTime: block1Time, CurrentEpoch: 0, CurrentEpochStartTime: time.Time{}}, + blockHeightTimePairs: map[int]time.Time{2: block1Time.Add(time.Second), 3: block1Time.Add(time.Minute), 4: block1Time.Add(30 * time.Minute)}, + expEpochInfo: types.EpochInfo{StartTime: block1Time, CurrentEpoch: 1, CurrentEpochStartTime: block1Time, CurrentEpochStartHeight: 1}, + }, + "Second block at exactly timer interval later does not tick": { + initialEpochInfo: types.EpochInfo{StartTime: block1Time, CurrentEpoch: 0, CurrentEpochStartTime: time.Time{}}, + blockHeightTimePairs: map[int]time.Time{2: block1Time.Add(defaultDuration)}, + expEpochInfo: types.EpochInfo{StartTime: block1Time, CurrentEpoch: 1, CurrentEpochStartTime: block1Time, CurrentEpochStartHeight: 1}, + }, + "Second block at timer interval + epsilon later does tick": { + initialEpochInfo: types.EpochInfo{StartTime: block1Time, CurrentEpoch: 0, CurrentEpochStartTime: time.Time{}}, + blockHeightTimePairs: map[int]time.Time{2: block1Time.Add(defaultDuration).Add(eps)}, + expEpochInfo: types.EpochInfo{StartTime: block1Time, CurrentEpoch: 2, CurrentEpochStartTime: block1Time.Add(time.Hour), CurrentEpochStartHeight: 2}, + }, + "Downtime recovery (many intervals), first block causes 1 tick and sets current start time 1 interval ahead": { + initialEpochInfo: types.EpochInfo{StartTime: block1Time, CurrentEpoch: 0, CurrentEpochStartTime: time.Time{}}, + blockHeightTimePairs: map[int]time.Time{2: block1Time.Add(24 * time.Hour)}, + expEpochInfo: types.EpochInfo{StartTime: block1Time, CurrentEpoch: 2, CurrentEpochStartTime: block1Time.Add(time.Hour), CurrentEpochStartHeight: 2}, + }, + "Downtime recovery (many intervals), second block is at tick 2, w/ start time 2 intervals ahead": { + initialEpochInfo: types.EpochInfo{StartTime: block1Time, CurrentEpoch: 0, CurrentEpochStartTime: time.Time{}}, + blockHeightTimePairs: map[int]time.Time{2: block1Time.Add(24 * time.Hour), 3: block1Time.Add(24 * time.Hour).Add(eps)}, + expEpochInfo: types.EpochInfo{StartTime: block1Time, CurrentEpoch: 3, CurrentEpochStartTime: block1Time.Add(2 * time.Hour), CurrentEpochStartHeight: 3}, + }, + "Many blocks between first and second tick": { + initialEpochInfo: types.EpochInfo{StartTime: block1Time, CurrentEpoch: 1, CurrentEpochStartTime: block1Time}, + blockHeightTimePairs: map[int]time.Time{2: block1Time.Add(time.Second), 3: block1Time.Add(2 * time.Second), 4: block1Time.Add(time.Hour).Add(eps)}, + expEpochInfo: types.EpochInfo{StartTime: block1Time, CurrentEpoch: 2, CurrentEpochStartTime: block1Time.Add(time.Hour), CurrentEpochStartHeight: 4}, + }, + "Distinct identifier and duration still works": { + initialEpochInfo: types.EpochInfo{Identifier: "hello", Duration: time.Minute, StartTime: block1Time, CurrentEpoch: 0, CurrentEpochStartTime: time.Time{}}, + blockHeightTimePairs: map[int]time.Time{2: block1Time.Add(time.Second), 3: block1Time.Add(time.Minute).Add(eps)}, + expEpochInfo: types.EpochInfo{Identifier: "hello", Duration: time.Minute, StartTime: block1Time, CurrentEpoch: 2, CurrentEpochStartTime: block1Time.Add(time.Minute), CurrentEpochStartHeight: 3}, + }, + "StartTime in future won't get ticked on first block": { + initialEpochInfo: types.EpochInfo{StartTime: block1Time.Add(time.Second), CurrentEpoch: 0, CurrentEpochStartTime: time.Time{}}, + // currentEpochStartHeight is 1 because that's when the timer was created on-chain + expEpochInfo: types.EpochInfo{StartTime: block1Time.Add(time.Second), CurrentEpoch: 0, CurrentEpochStartTime: time.Time{}, CurrentEpochStartHeight: 1}, + }, + "StartTime in past will get ticked on first block": { + initialEpochInfo: types.EpochInfo{StartTime: block1Time.Add(-time.Second), CurrentEpoch: 0, CurrentEpochStartTime: time.Time{}}, + expEpochInfo: types.EpochInfo{StartTime: block1Time.Add(-time.Second), CurrentEpoch: 1, CurrentEpochStartTime: block1Time.Add(-time.Second), CurrentEpochStartHeight: 1}, + }, + } + for name, test := range tests { + s.Run(name, func() { + s.SetupTest() + s.Ctx = s.Ctx.WithHeaderInfo(header.Info{Height: 1, Time: block1Time}) + initialEpoch := initializeBlankEpochInfoFields(test.initialEpochInfo, defaultIdentifier, defaultDuration) + err := s.EpochsKeeper.AddEpochInfo(s.Ctx, initialEpoch) + s.Require().NoError(err) + err = s.EpochsKeeper.BeginBlocker(s.Ctx) + s.Require().NoError(err) + + // get sorted heights + heights := maps.Keys(test.blockHeightTimePairs) + sort.Slice(heights, func(i, j int) bool { return heights[i] < heights[j] }) + + for _, h := range heights { + // for each height in order, run begin block + s.Ctx = s.Ctx.WithHeaderInfo(header.Info{Height: int64(h), Time: test.blockHeightTimePairs[h]}) + err := s.EpochsKeeper.BeginBlocker(s.Ctx) + s.Require().NoError(err) + } + expEpoch := initializeBlankEpochInfoFields(test.expEpochInfo, initialEpoch.Identifier, initialEpoch.Duration) + actEpoch, err := s.EpochsKeeper.EpochInfo.Get(s.Ctx, initialEpoch.Identifier) + s.Require().NoError(err) + s.Require().Equal(expEpoch, actEpoch) + }) + } +} + +// initializeBlankEpochInfoFields set identifier, duration and epochCountingStarted if blank in epoch. +func initializeBlankEpochInfoFields(epoch types.EpochInfo, identifier string, duration time.Duration) types.EpochInfo { + if epoch.Identifier == "" { + epoch.Identifier = identifier + } + if epoch.Duration == time.Duration(0) { + epoch.Duration = duration + } + epoch.EpochCountingStarted = (epoch.CurrentEpoch != 0) + + return epoch +} + +func TestEpochStartingOneMonthAfterInitGenesis(t *testing.T) { + t.Parallel() + ctx, epochsKeeper := Setup(t) + // On init genesis, default epochs information is set + // To check init genesis again, should make it fresh status + epochInfos, err := epochsKeeper.AllEpochInfos(ctx) + require.NoError(t, err) + for _, epochInfo := range epochInfos { + err := epochsKeeper.EpochInfo.Remove(ctx, epochInfo.Identifier) + require.NoError(t, err) + } + + now := time.Now() + week := time.Hour * 24 * 7 + month := time.Hour * 24 * 30 + initialBlockHeight := int64(1) + ctx = ctx.WithHeaderInfo(header.Info{Height: initialBlockHeight, Time: now}) + + err = epochsKeeper.InitGenesis(ctx, types.GenesisState{ + Epochs: []types.EpochInfo{ + { + Identifier: "monthly", + StartTime: now.Add(month), + Duration: time.Hour * 24 * 30, + CurrentEpoch: 0, + CurrentEpochStartHeight: ctx.HeaderInfo().Height, + CurrentEpochStartTime: time.Time{}, + EpochCountingStarted: false, + }, + }, + }) + require.NoError(t, err) + + // epoch not started yet + epochInfo, err := epochsKeeper.EpochInfo.Get(ctx, "monthly") + require.NoError(t, err) + require.Equal(t, int64(0), epochInfo.CurrentEpoch) + require.Equal(t, epochInfo.CurrentEpochStartHeight, initialBlockHeight) + require.Equal(t, time.Time{}, epochInfo.CurrentEpochStartTime) + require.False(t, epochInfo.EpochCountingStarted) + + // after 1 week + ctx = ctx.WithHeaderInfo(header.Info{Height: 2, Time: now.Add(week)}) + err = epochsKeeper.BeginBlocker(ctx) + require.NoError(t, err) + + // epoch not started yet + epochInfo, err = epochsKeeper.EpochInfo.Get(ctx, "monthly") + require.NoError(t, err) + require.Equal(t, int64(0), epochInfo.CurrentEpoch) + require.Equal(t, epochInfo.CurrentEpochStartHeight, initialBlockHeight) + require.Equal(t, time.Time{}, epochInfo.CurrentEpochStartTime) + require.False(t, epochInfo.EpochCountingStarted) + + // after 1 month + ctx = ctx.WithHeaderInfo(header.Info{Height: 3, Time: now.Add(month)}) + err = epochsKeeper.BeginBlocker(ctx) + require.NoError(t, err) + + // epoch started + epochInfo, err = epochsKeeper.EpochInfo.Get(ctx, "monthly") + require.NoError(t, err) + require.Equal(t, int64(1), epochInfo.CurrentEpoch) + require.Equal(t, epochInfo.CurrentEpochStartHeight, ctx.HeaderInfo().Height) + require.Equal(t, epochInfo.CurrentEpochStartTime.UTC().String(), now.Add(month).UTC().String()) + require.True(t, epochInfo.EpochCountingStarted) +} diff --git a/client/x/epochs/keeper/epoch.go b/client/x/epochs/keeper/epoch.go new file mode 100644 index 00000000..188a6399 --- /dev/null +++ b/client/x/epochs/keeper/epoch.go @@ -0,0 +1,72 @@ +package keeper + +import ( + "context" + "fmt" + + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/piplabs/story/client/x/epochs/types" +) + +// AddEpochInfo adds a new epoch info. Will return an error if the epoch fails validation, +// or re-uses an existing identifier. +// This method also sets the start time if left unset, and sets the epoch start height. +func (k Keeper) AddEpochInfo(ctx context.Context, epoch types.EpochInfo) error { + err := epoch.Validate() + if err != nil { + return err + } + // Check if identifier already exists + isExist, err := k.EpochInfo.Has(ctx, epoch.Identifier) + if err != nil { + return err + } + if isExist { + return fmt.Errorf("epoch with identifier %s already exists", epoch.Identifier) + } + + // Initialize empty and default epoch values + if epoch.StartTime.IsZero() { + epoch.StartTime = sdk.UnwrapSDKContext(ctx).HeaderInfo().Time + } + if epoch.CurrentEpochStartHeight == 0 { + epoch.CurrentEpochStartHeight = sdk.UnwrapSDKContext(ctx).HeaderInfo().Height + } + + return k.EpochInfo.Set(ctx, epoch.Identifier, epoch) +} + +// AllEpochInfos iterate through epochs to return all epochs info. +func (k Keeper) AllEpochInfos(ctx context.Context) ([]types.EpochInfo, error) { + epochs := []types.EpochInfo{} + err := k.EpochInfo.Walk( + ctx, + nil, + func(_ string, value types.EpochInfo) (stop bool, err error) { + epochs = append(epochs, value) + return false, nil + }, + ) + + return epochs, err +} + +// NumBlocksSinceEpochStart returns the number of blocks since the epoch started. +// if the epoch started on block N, then calling this during block N (after BeforeEpochStart) +// would return 0. +// Calling it any point in block N+1 (assuming the epoch doesn't increment) would return 1. +func (k Keeper) NumBlocksSinceEpochStart(ctx context.Context, identifier string) (int64, error) { + epoch, err := k.EpochInfo.Get(ctx, identifier) + if err != nil { + return 0, fmt.Errorf("epoch with identifier %s not found", identifier) + } + + return sdk.UnwrapSDKContext(ctx).HeaderInfo().Height - epoch.CurrentEpochStartHeight, nil +} + +// GetEpochInfo gets current epoch info by identifier. +// NOTE(Narangde): add this func which can be a diff from cosmos-sdk. +func (k Keeper) GetEpochInfo(ctx context.Context, identifier string) (types.EpochInfo, error) { + return k.EpochInfo.Get(ctx, identifier) +} diff --git a/client/x/epochs/keeper/epoch_test.go b/client/x/epochs/keeper/epoch_test.go new file mode 100644 index 00000000..888e81e4 --- /dev/null +++ b/client/x/epochs/keeper/epoch_test.go @@ -0,0 +1,105 @@ +package keeper_test + +import ( + "time" + + "cosmossdk.io/core/header" + + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/piplabs/story/client/x/epochs/types" +) + +func (s *KeeperTestSuite) TestAddEpochInfo() { + defaultIdentifier := "default_add_epoch_info_id" + defaultDuration := time.Hour + startBlockHeight := int64(100) + startBlockTime := time.Unix(1656907200, 0).UTC() + tests := map[string]struct { + addedEpochInfo types.EpochInfo + expErr bool + expEpochInfo types.EpochInfo + }{ + "simple_add": { + addedEpochInfo: types.EpochInfo{ + Identifier: defaultIdentifier, + StartTime: time.Time{}, + Duration: defaultDuration, + CurrentEpoch: 0, + CurrentEpochStartHeight: 0, + CurrentEpochStartTime: time.Time{}, + EpochCountingStarted: false, + }, + expErr: false, + expEpochInfo: types.EpochInfo{ + Identifier: defaultIdentifier, + StartTime: startBlockTime, + Duration: defaultDuration, + CurrentEpoch: 0, + CurrentEpochStartHeight: startBlockHeight, + CurrentEpochStartTime: time.Time{}, + EpochCountingStarted: false, + }, + }, + "zero_duration": { + addedEpochInfo: types.EpochInfo{ + Identifier: defaultIdentifier, + StartTime: time.Time{}, + Duration: time.Duration(0), + CurrentEpoch: 0, + CurrentEpochStartHeight: 0, + CurrentEpochStartTime: time.Time{}, + EpochCountingStarted: false, + }, + expErr: true, + }, + } + for name, test := range tests { + s.Run(name, func() { + s.SetupTest() + s.Ctx = s.Ctx.WithHeaderInfo(header.Info{Height: startBlockHeight, Time: startBlockTime}) + err := s.EpochsKeeper.AddEpochInfo(s.Ctx, test.addedEpochInfo) + if !test.expErr { + s.Require().NoError(err) + actualEpochInfo, err := s.EpochsKeeper.EpochInfo.Get(s.Ctx, test.addedEpochInfo.Identifier) + s.Require().NoError(err) + s.Require().Equal(test.expEpochInfo, actualEpochInfo) + } else { + s.Require().Error(err) + } + }) + } +} + +func (s *KeeperTestSuite) TestDuplicateAddEpochInfo() { + identifier := "duplicate_add_epoch_info" + epochInfo := types.NewGenesisEpochInfo(identifier, time.Hour*24*30) + err := s.EpochsKeeper.AddEpochInfo(s.Ctx, epochInfo) + s.Require().NoError(err) + err = s.EpochsKeeper.AddEpochInfo(s.Ctx, epochInfo) + s.Require().Error(err) +} + +func (s *KeeperTestSuite) TestEpochLifeCycle() { + s.SetupTest() + + epochInfo := types.NewGenesisEpochInfo("monthly", time.Hour*24*30) + err := s.EpochsKeeper.AddEpochInfo(s.Ctx, epochInfo) + s.Require().NoError(err) + epochInfoSaved, err := s.EpochsKeeper.EpochInfo.Get(s.Ctx, "monthly") + s.Require().NoError(err) + // setup expected epoch info + expectedEpochInfo := epochInfo + expectedEpochInfo.StartTime = sdk.UnwrapSDKContext(s.Ctx).HeaderInfo().Time + expectedEpochInfo.CurrentEpochStartHeight = s.Ctx.BlockHeight() + s.Require().Equal(expectedEpochInfo, epochInfoSaved) + + allEpochs, err := s.EpochsKeeper.AllEpochInfos(s.Ctx) + s.Require().NoError(err) + s.Require().Len(allEpochs, 5) + s.Require().Equal(allEpochs[0].Identifier, "day") // alphabetical order + s.Require().Equal(allEpochs[1].Identifier, "hour") + s.Require().Equal(allEpochs[2].Identifier, "minute") + s.Require().Equal(allEpochs[3].Identifier, "monthly") + s.Require().Equal(allEpochs[4].Identifier, "week") +} diff --git a/client/x/epochs/keeper/genesis.go b/client/x/epochs/keeper/genesis.go new file mode 100644 index 00000000..ba881a66 --- /dev/null +++ b/client/x/epochs/keeper/genesis.go @@ -0,0 +1,31 @@ +package keeper + +import ( + "context" + + "github.com/piplabs/story/client/x/epochs/types" +) + +// InitGenesis sets epoch info from genesis. +func (k Keeper) InitGenesis(ctx context.Context, genState types.GenesisState) error { + for _, epoch := range genState.Epochs { + err := k.AddEpochInfo(ctx, epoch) + if err != nil { + return err + } + } + + return nil +} + +// ExportGenesis returns the capability module's exported genesis. +func (k Keeper) ExportGenesis(ctx context.Context) (*types.GenesisState, error) { + genesis := types.DefaultGenesis() + epochs, err := k.AllEpochInfos(ctx) + if err != nil { + return nil, err + } + genesis.Epochs = epochs + + return genesis, nil +} diff --git a/client/x/epochs/keeper/genesis_test.go b/client/x/epochs/keeper/genesis_test.go new file mode 100644 index 00000000..8ebe388e --- /dev/null +++ b/client/x/epochs/keeper/genesis_test.go @@ -0,0 +1,99 @@ +package keeper_test + +import ( + "testing" + "time" + + "cosmossdk.io/core/header" + + "github.com/stretchr/testify/require" + + "github.com/piplabs/story/client/x/epochs/types" +) + +//nolint:paralleltest // no parallel +func TestEpochsExportGenesis(t *testing.T) { + ctx, epochsKeeper := Setup(t) + + chainStartTime := ctx.BlockTime() + chainStartHeight := ctx.HeaderInfo().Height + + genesis, err := epochsKeeper.ExportGenesis(ctx) + require.NoError(t, err) + require.Len(t, genesis.Epochs, 4) + + expectedEpochs := types.DefaultGenesis().Epochs + for i := range expectedEpochs { + expectedEpochs[i].CurrentEpochStartHeight = chainStartHeight + expectedEpochs[i].StartTime = chainStartTime + } + require.Equal(t, expectedEpochs, genesis.Epochs) +} + +func TestEpochsInitGenesis(t *testing.T) { + t.Parallel() + ctx, epochsKeeper := Setup(t) + + // On init genesis, default epochs information is set + // To check init genesis again, should make it fresh status + epochInfos, err := epochsKeeper.AllEpochInfos(ctx) + require.NoError(t, err) + for _, epochInfo := range epochInfos { + err := epochsKeeper.EpochInfo.Remove(ctx, epochInfo.Identifier) + require.NoError(t, err) + } + + // now := time.Now() + ctx.WithHeaderInfo(header.Info{Height: 1, Time: time.Now().UTC()}) + + // test genesisState validation + genesisState := types.GenesisState{ + Epochs: []types.EpochInfo{ + { + Identifier: "monthly", + StartTime: time.Time{}, + Duration: time.Hour * 24, + CurrentEpoch: 0, + CurrentEpochStartHeight: ctx.BlockHeight(), + CurrentEpochStartTime: time.Time{}, + EpochCountingStarted: true, + }, + { + Identifier: "monthly", + StartTime: time.Time{}, + Duration: time.Hour * 24, + CurrentEpoch: 0, + CurrentEpochStartHeight: ctx.BlockHeight(), + CurrentEpochStartTime: time.Time{}, + EpochCountingStarted: true, + }, + }, + } + require.EqualError(t, genesisState.Validate(), "epoch identifier should be unique") + + genesisState = types.GenesisState{ + Epochs: []types.EpochInfo{ + { + Identifier: "monthly", + StartTime: time.Time{}, + Duration: time.Hour * 24, + CurrentEpoch: 0, + CurrentEpochStartHeight: ctx.BlockHeight(), + CurrentEpochStartTime: time.Time{}, + EpochCountingStarted: true, + }, + }, + } + + err = epochsKeeper.InitGenesis(ctx, genesisState) + require.NoError(t, err) + epochInfo, err := epochsKeeper.EpochInfo.Get(ctx, "monthly") + require.NoError(t, err) + require.Equal(t, "monthly", epochInfo.Identifier) + require.Equal(t, epochInfo.StartTime.UTC().String(), ctx.HeaderInfo().Time.UTC().String()) + require.Equal(t, time.Hour*24, epochInfo.Duration) + require.Equal(t, int64(0), epochInfo.CurrentEpoch) + require.Equal(t, epochInfo.CurrentEpochStartHeight, ctx.BlockHeight()) + require.Equal(t, epochInfo.CurrentEpochStartTime.UTC().String(), time.Time{}.String()) + require.True(t, epochInfo.EpochCountingStarted) +} diff --git a/client/x/epochs/keeper/grpc_query.go b/client/x/epochs/keeper/grpc_query.go new file mode 100644 index 00000000..cd26d019 --- /dev/null +++ b/client/x/epochs/keeper/grpc_query.go @@ -0,0 +1,51 @@ +package keeper + +import ( + "context" + "errors" + + "github.com/piplabs/story/client/x/epochs/types" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var _ types.QueryServer = Querier{} + +// Querier defines a wrapper around the x/epochs keeper providing gRPC method +// handlers. +type Querier struct { + Keeper +} + +// NewQuerier initializes new querier. +func NewQuerier(k Keeper) Querier { + return Querier{Keeper: k} +} + +// EpochInfos provide running epochInfos. +func (q Querier) EpochInfos(ctx context.Context, _ *types.QueryEpochsInfoRequest) (*types.QueryEpochsInfoResponse, error) { + epochs, err := q.Keeper.AllEpochInfos(ctx) + return &types.QueryEpochsInfoResponse{ + Epochs: epochs, + }, err +} + +// CurrentEpoch provides current epoch of specified identifier. +func (q Querier) CurrentEpoch(ctx context.Context, req *types.QueryCurrentEpochRequest) (*types.QueryCurrentEpochResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + if req.Identifier == "" { + return nil, status.Error(codes.InvalidArgument, "identifier is empty") + } + + info, err := q.Keeper.EpochInfo.Get(ctx, req.Identifier) + if err != nil { + return nil, errors.New("not available identifier") + } + + return &types.QueryCurrentEpochResponse{ + CurrentEpoch: info.CurrentEpoch, + }, nil +} diff --git a/client/x/epochs/keeper/grpc_query_test.go b/client/x/epochs/keeper/grpc_query_test.go new file mode 100644 index 00000000..30fc6dba --- /dev/null +++ b/client/x/epochs/keeper/grpc_query_test.go @@ -0,0 +1,22 @@ +package keeper_test + +import ( + "github.com/piplabs/story/client/x/epochs/types" +) + +func (s *KeeperTestSuite) TestQueryEpochInfos() { + s.SetupTest() + queryClient := s.queryClient + + // Check that querying epoch infos on default genesis returns the default genesis epoch infos + epochInfosResponse, err := queryClient.EpochInfos(s.Ctx, &types.QueryEpochsInfoRequest{}) + s.Require().NoError(err) + s.Require().Len(epochInfosResponse.Epochs, 4) + expectedEpochs := types.DefaultGenesis().Epochs + for id := range expectedEpochs { + expectedEpochs[id].StartTime = s.Ctx.BlockTime() + expectedEpochs[id].CurrentEpochStartHeight = s.Ctx.BlockHeight() + } + + s.Require().Equal(expectedEpochs, epochInfosResponse.Epochs) +} diff --git a/client/x/epochs/keeper/hooks.go b/client/x/epochs/keeper/hooks.go new file mode 100644 index 00000000..28d2d939 --- /dev/null +++ b/client/x/epochs/keeper/hooks.go @@ -0,0 +1,27 @@ +package keeper + +import ( + "context" + + "github.com/piplabs/story/client/x/epochs/types" +) + +// Hooks gets the hooks for governance Keeper. +func (k Keeper) Hooks() types.EpochHooks { + if k.hooks == nil { + // return a no-op implementation if no hooks are set + return types.MultiEpochHooks{} + } + + return k.hooks +} + +// AfterEpochEnd gets called at the end of the epoch, end of epoch is the timestamp of first block produced after epoch duration. +func (k Keeper) AfterEpochEnd(ctx context.Context, identifier string, epochNumber int64) error { + return k.Hooks().AfterEpochEnd(ctx, identifier, epochNumber) +} + +// BeforeEpochStart new epoch is next block of epoch end block. +func (k Keeper) BeforeEpochStart(ctx context.Context, identifier string, epochNumber int64) error { + return k.Hooks().BeforeEpochStart(ctx, identifier, epochNumber) +} diff --git a/client/x/epochs/keeper/keeper.go b/client/x/epochs/keeper/keeper.go new file mode 100644 index 00000000..183bfae6 --- /dev/null +++ b/client/x/epochs/keeper/keeper.go @@ -0,0 +1,53 @@ +package keeper + +import ( + "cosmossdk.io/collections" + "cosmossdk.io/core/event" + "cosmossdk.io/core/store" + + "github.com/cosmos/cosmos-sdk/codec" + + "github.com/piplabs/story/client/x/epochs/types" +) + +type Keeper struct { + cdc codec.BinaryCodec + hooks types.EpochHooks + + // NOTE(Narangde): Add storeService and EventService manually instead of Environment + storeService store.KVStoreService + EventService event.Service + + Schema collections.Schema + EpochInfo collections.Map[string, types.EpochInfo] +} + +// NewKeeper returns a new keeper by codec and storeKey inputs. +func NewKeeper(storeService store.KVStoreService, eventService event.Service, cdc codec.BinaryCodec) *Keeper { + sb := collections.NewSchemaBuilder(storeService) + k := Keeper{ + cdc: cdc, + storeService: storeService, + EventService: eventService, + EpochInfo: collections.NewMap(sb, types.KeyPrefixEpoch, "epoch_info", collections.StringKey, codec.CollValue[types.EpochInfo](cdc)), + } + + schema, err := sb.Build() + if err != nil { + panic(err) + } + k.Schema = schema + + return &k +} + +// Set hooks. +func (k *Keeper) SetHooks(eh types.EpochHooks) *Keeper { + if k.hooks != nil { + panic("cannot set epochs hooks twice") + } + + k.hooks = eh + + return k +} diff --git a/client/x/epochs/keeper/keeper_test.go b/client/x/epochs/keeper/keeper_test.go new file mode 100644 index 00000000..163ad219 --- /dev/null +++ b/client/x/epochs/keeper/keeper_test.go @@ -0,0 +1,91 @@ +package keeper_test + +import ( + "testing" + "time" + + "cosmossdk.io/core/header" + storetypes "cosmossdk.io/store/types" + + "github.com/cosmos/cosmos-sdk/baseapp" + "github.com/cosmos/cosmos-sdk/runtime" + "github.com/cosmos/cosmos-sdk/testutil" + sdk "github.com/cosmos/cosmos-sdk/types" + cosmosmodule "github.com/cosmos/cosmos-sdk/types/module" + moduletestutil "github.com/cosmos/cosmos-sdk/types/module/testutil" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + epochskeeper "github.com/piplabs/story/client/x/epochs/keeper" + "github.com/piplabs/story/client/x/epochs/module" + "github.com/piplabs/story/client/x/epochs/types" +) + +type KeeperTestSuite struct { + suite.Suite + Ctx sdk.Context + EpochsKeeper *epochskeeper.Keeper + queryClient types.QueryClient +} + +func (s *KeeperTestSuite) SetupTest() { + s.Ctx, s.EpochsKeeper = Setup(s.T()) + + queryRouter := baseapp.NewGRPCQueryRouter() + cfg := cosmosmodule.NewConfigurator(nil, nil, queryRouter) + types.RegisterQueryServer(cfg.QueryServer(), epochskeeper.NewQuerier(*s.EpochsKeeper)) + grpcQueryService := &baseapp.QueryServiceTestHelper{ + GRPCQueryRouter: queryRouter, + Ctx: s.Ctx, + } + encCfg := moduletestutil.MakeTestEncodingConfig(module.AppModuleBasic{}) + grpcQueryService.SetInterfaceRegistry(encCfg.InterfaceRegistry) + s.queryClient = types.NewQueryClient(grpcQueryService) +} + +func Setup(t *testing.T) (sdk.Context, *epochskeeper.Keeper) { + t.Helper() + + key := storetypes.NewKVStoreKey(types.StoreKey) + storeService := runtime.NewKVStoreService(key) + eventService := runtime.EventService{} + testCtx := testutil.DefaultContextWithDB(t, key, storetypes.NewTransientStoreKey("transient_test")) + ctx := testCtx.Ctx.WithHeaderInfo(header.Info{Time: time.Now()}) + encCfg := moduletestutil.MakeTestEncodingConfig(module.AppModuleBasic{}) + + epochsKeeper := epochskeeper.NewKeeper( + storeService, + eventService, + encCfg.Codec, + ) + epochsKeeper = epochsKeeper.SetHooks(types.NewMultiEpochHooks()) + ctx.WithHeaderInfo(header.Info{Height: 1, Time: time.Now().UTC(), ChainID: "epochs"}) + err := epochsKeeper.InitGenesis(ctx, *types.DefaultGenesis()) + require.NoError(t, err) + SetEpochStartTime(ctx, *epochsKeeper) + + return ctx, epochsKeeper +} + +func TestKeeperTestSuite(t *testing.T) { + t.Parallel() + suite.Run(t, new(KeeperTestSuite)) +} + +func SetEpochStartTime(ctx sdk.Context, epochsKeeper epochskeeper.Keeper) { + epochs, err := epochsKeeper.AllEpochInfos(ctx) + if err != nil { + panic(err) + } + for _, epoch := range epochs { + epoch.StartTime = ctx.BlockTime() + err := epochsKeeper.EpochInfo.Remove(ctx, epoch.Identifier) + if err != nil { + panic(err) + } + err = epochsKeeper.AddEpochInfo(ctx, epoch) + if err != nil { + panic(err) + } + } +} diff --git a/client/x/epochs/module/depinject.go b/client/x/epochs/module/depinject.go new file mode 100644 index 00000000..9b3fa336 --- /dev/null +++ b/client/x/epochs/module/depinject.go @@ -0,0 +1,83 @@ +package module + +import ( + "fmt" + "sort" + + "golang.org/x/exp/maps" + + "cosmossdk.io/core/appmodule" + "cosmossdk.io/core/event" + "cosmossdk.io/core/store" + "cosmossdk.io/depinject" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + + "github.com/piplabs/story/client/x/epochs/keeper" + "github.com/piplabs/story/client/x/epochs/types" +) + +var _ depinject.OnePerModuleType = AppModule{} + +// IsOnePerModuleType implements the depinject.OnePerModuleType interface. +func (AppModule) IsOnePerModuleType() {} + +//nolint:gochecknoinits // Cosmos-style +func init() { + appmodule.Register( + &Module{}, + appmodule.Provide(ProvideModule), + appmodule.Invoke(InvokeSetHooks), + ) +} + +type ModuleInputs struct { + depinject.In + + StoreService store.KVStoreService + Cdc codec.Codec + Config *Module + TXConfig client.TxConfig + + EventService event.Service +} + +type ModuleOutputs struct { + depinject.Out + + EpochKeeper *keeper.Keeper + Module appmodule.AppModule +} + +func ProvideModule(in ModuleInputs) ModuleOutputs { + k := keeper.NewKeeper(in.StoreService, in.EventService, in.Cdc) + m := NewAppModule(in.Cdc, k) + + return ModuleOutputs{EpochKeeper: k, Module: m} +} + +func InvokeSetHooks(keeper *keeper.Keeper, hooks map[string]types.EpochHooksWrapper) error { + if hooks == nil { + return nil + } + + // Default ordering is lexical by module name. + // Explicit ordering can be added to the module config if required. + modNames := maps.Keys(hooks) + order := modNames + sort.Strings(order) + + var multiHooks types.MultiEpochHooks + for _, modName := range order { + hook, ok := hooks[modName] + if !ok { + return fmt.Errorf("can't find epoch hooks for module %s", modName) + } + multiHooks = append(multiHooks, hook) + } + + keeper.SetHooks(multiHooks) + + return nil +} diff --git a/client/x/epochs/module/module.go b/client/x/epochs/module/module.go new file mode 100644 index 00000000..eeeef0c8 --- /dev/null +++ b/client/x/epochs/module/module.go @@ -0,0 +1,137 @@ +package module + +import ( + "context" + "encoding/json" + "fmt" + + "cosmossdk.io/core/appmodule" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + + "github.com/piplabs/story/client/x/epochs/keeper" + "github.com/piplabs/story/client/x/epochs/types" + "github.com/piplabs/story/lib/errors" +) + +var ( + _ module.HasGenesis = AppModule{} + + _ appmodule.AppModule = AppModule{} + _ appmodule.HasBeginBlocker = AppModule{} +) + +const ConsensusVersion = 1 + +// ---------------------------------------------------------------------------- +// AppModuleBasic +// ---------------------------------------------------------------------------- + +// AppModuleBasic implements the AppModuleBasic interface that defines the +// independent methods a Cosmos SDK module needs to implement. +type AppModuleBasic struct { + cdc codec.BinaryCodec +} + +func NewAppModuleBasic(cdc codec.BinaryCodec) AppModuleBasic { + return AppModuleBasic{cdc: cdc} +} + +// Name returns the name of the module as a string. +func (AppModuleBasic) Name() string { + return types.ModuleName +} + +// RegisterLegacyAminoCodec registers the amino codec for the module, which is used +// to marshal and unmarshal structs to/from []byte in order to persist them in the module's KVStore. +func (AppModuleBasic) RegisterLegacyAminoCodec(*codec.LegacyAmino) {} + +// RegisterInterfaces registers a module's interface types and their concrete implementations as proto.Message. +func (AppModuleBasic) RegisterInterfaces(reg codectypes.InterfaceRegistry) { + types.RegisterInterfaces(reg) +} + +// RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the module. +func (AppModuleBasic) RegisterGRPCGatewayRoutes(client.Context, *runtime.ServeMux) {} + +// ---------------------------------------------------------------------------- +// AppModule +// ---------------------------------------------------------------------------- + +// AppModule implements the AppModule interface for the epochs module. +type AppModule struct { + AppModuleBasic + + keeper *keeper.Keeper +} + +// NewAppModule creates a new AppModule object. +func NewAppModule(cdc codec.Codec, keeper *keeper.Keeper) AppModule { + return AppModule{ + AppModuleBasic: NewAppModuleBasic(cdc), + keeper: keeper, + } +} + +// IsAppModule implements the appmodule.AppModule interface. +func (AppModule) IsAppModule() {} + +// Name returns the epochs module's name. +// Deprecated: kept for legacy reasons. +func (AppModule) Name() string { + return types.ModuleName +} + +// RegisterLegacyAminoCodec registers the epochs module's types for the given codec. +func (AppModule) RegisterLegacyAminoCodec(*codec.LegacyAmino) {} + +// RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the epochs module. +func (AppModule) RegisterGRPCGatewayRoutes(_ client.Context, _ *runtime.ServeMux) {} + +// DefaultGenesis returns the epochs module's default genesis state. +func (AppModule) DefaultGenesis(cdc codec.JSONCodec) json.RawMessage { + return cdc.MustMarshalJSON(types.DefaultGenesis()) +} + +// ValidateGenesis performs genesis state validation for the epochs module. +func (AppModule) ValidateGenesis(cdc codec.JSONCodec, _ client.TxEncodingConfig, bz json.RawMessage) error { + var gs types.GenesisState + if err := cdc.UnmarshalJSON(bz, &gs); err != nil { + return errors.Wrap(err, "failed to unmarshal %s genesis state", types.ModuleName) + } + + return gs.Validate() +} + +// InitGenesis performs the epochs module's genesis initialization. +func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, bz json.RawMessage) { + var gs types.GenesisState + cdc.MustUnmarshalJSON(bz, &gs) + + if err := am.keeper.InitGenesis(ctx, gs); err != nil { + panic(fmt.Sprintf("failed to initialize %s genesis state: %v", types.ModuleName, err)) + } +} + +// ExportGenesis returns the epochs module's exported genesis state as raw JSON bytes. +func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.RawMessage { + gs, err := am.keeper.ExportGenesis(ctx) + if err != nil { + panic(err) + } + + return cdc.MustMarshalJSON(gs) +} + +// ConsensusVersion implements HasConsensusVersion. +func (AppModule) ConsensusVersion() uint64 { return ConsensusVersion } + +// BeginBlock executes all ABCI BeginBlock logic respective to the epochs module. +func (am AppModule) BeginBlock(ctx context.Context) error { + return am.keeper.BeginBlocker(ctx) +} diff --git a/client/x/epochs/module/module.proto b/client/x/epochs/module/module.proto new file mode 100644 index 00000000..df4c1cf8 --- /dev/null +++ b/client/x/epochs/module/module.proto @@ -0,0 +1,12 @@ +syntax = "proto3"; + +package client.x.epochs.module; + +import "cosmos/app/v1alpha1/module.proto"; + +// Module is the config object of the epochs module. +message Module { + option (cosmos.app.v1alpha1.module) = { + go_import: "github.com/piplabs/story/client/x/epochs" + }; +} \ No newline at end of file diff --git a/client/x/epochs/module/module.pulsar.go b/client/x/epochs/module/module.pulsar.go new file mode 100644 index 00000000..5ad34eea --- /dev/null +++ b/client/x/epochs/module/module.pulsar.go @@ -0,0 +1,502 @@ +// Code generated by protoc-gen-go-pulsar. DO NOT EDIT. +package module + +import ( + _ "cosmossdk.io/api/cosmos/app/v1alpha1" + fmt "fmt" + runtime "github.com/cosmos/cosmos-proto/runtime" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoiface "google.golang.org/protobuf/runtime/protoiface" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + io "io" + reflect "reflect" + sync "sync" +) + +var ( + md_Module protoreflect.MessageDescriptor +) + +func init() { + file_client_x_epochs_module_module_proto_init() + md_Module = File_client_x_epochs_module_module_proto.Messages().ByName("Module") +} + +var _ protoreflect.Message = (*fastReflection_Module)(nil) + +type fastReflection_Module Module + +func (x *Module) ProtoReflect() protoreflect.Message { + return (*fastReflection_Module)(x) +} + +func (x *Module) slowProtoReflect() protoreflect.Message { + mi := &file_client_x_epochs_module_module_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +var _fastReflection_Module_messageType fastReflection_Module_messageType +var _ protoreflect.MessageType = fastReflection_Module_messageType{} + +type fastReflection_Module_messageType struct{} + +func (x fastReflection_Module_messageType) Zero() protoreflect.Message { + return (*fastReflection_Module)(nil) +} +func (x fastReflection_Module_messageType) New() protoreflect.Message { + return new(fastReflection_Module) +} +func (x fastReflection_Module_messageType) Descriptor() protoreflect.MessageDescriptor { + return md_Module +} + +// Descriptor returns message descriptor, which contains only the protobuf +// type information for the message. +func (x *fastReflection_Module) Descriptor() protoreflect.MessageDescriptor { + return md_Module +} + +// Type returns the message type, which encapsulates both Go and protobuf +// type information. If the Go type information is not needed, +// it is recommended that the message descriptor be used instead. +func (x *fastReflection_Module) Type() protoreflect.MessageType { + return _fastReflection_Module_messageType +} + +// New returns a newly allocated and mutable empty message. +func (x *fastReflection_Module) New() protoreflect.Message { + return new(fastReflection_Module) +} + +// Interface unwraps the message reflection interface and +// returns the underlying ProtoMessage interface. +func (x *fastReflection_Module) Interface() protoreflect.ProtoMessage { + return (*Module)(x) +} + +// Range iterates over every populated field in an undefined order, +// calling f for each field descriptor and value encountered. +// Range returns immediately if f returns false. +// While iterating, mutating operations may only be performed +// on the current field descriptor. +func (x *fastReflection_Module) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { +} + +// Has reports whether a field is populated. +// +// Some fields have the property of nullability where it is possible to +// distinguish between the default value of a field and whether the field +// was explicitly populated with the default value. Singular message fields, +// member fields of a oneof, and proto2 scalar fields are nullable. Such +// fields are populated only if explicitly set. +// +// In other cases (aside from the nullable cases above), +// a proto3 scalar field is populated if it contains a non-zero value, and +// a repeated field is populated if it is non-empty. +func (x *fastReflection_Module) Has(fd protoreflect.FieldDescriptor) bool { + switch fd.FullName() { + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: client.x.epochs.module.Module")) + } + panic(fmt.Errorf("message client.x.epochs.module.Module does not contain field %s", fd.FullName())) + } +} + +// Clear clears the field such that a subsequent Has call reports false. +// +// Clearing an extension field clears both the extension type and value +// associated with the given field number. +// +// Clear is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_Module) Clear(fd protoreflect.FieldDescriptor) { + switch fd.FullName() { + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: client.x.epochs.module.Module")) + } + panic(fmt.Errorf("message client.x.epochs.module.Module does not contain field %s", fd.FullName())) + } +} + +// Get retrieves the value for a field. +// +// For unpopulated scalars, it returns the default value, where +// the default value of a bytes scalar is guaranteed to be a copy. +// For unpopulated composite types, it returns an empty, read-only view +// of the value; to obtain a mutable reference, use Mutable. +func (x *fastReflection_Module) Get(descriptor protoreflect.FieldDescriptor) protoreflect.Value { + switch descriptor.FullName() { + default: + if descriptor.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: client.x.epochs.module.Module")) + } + panic(fmt.Errorf("message client.x.epochs.module.Module does not contain field %s", descriptor.FullName())) + } +} + +// Set stores the value for a field. +// +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType. +// When setting a composite type, it is unspecified whether the stored value +// aliases the source's memory in any way. If the composite value is an +// empty, read-only value, then it panics. +// +// Set is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_Module) Set(fd protoreflect.FieldDescriptor, value protoreflect.Value) { + switch fd.FullName() { + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: client.x.epochs.module.Module")) + } + panic(fmt.Errorf("message client.x.epochs.module.Module does not contain field %s", fd.FullName())) + } +} + +// Mutable returns a mutable reference to a composite type. +// +// If the field is unpopulated, it may allocate a composite value. +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType +// if not already stored. +// It panics if the field does not contain a composite type. +// +// Mutable is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_Module) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: client.x.epochs.module.Module")) + } + panic(fmt.Errorf("message client.x.epochs.module.Module does not contain field %s", fd.FullName())) + } +} + +// NewField returns a new value that is assignable to the field +// for the given descriptor. For scalars, this returns the default value. +// For lists, maps, and messages, this returns a new, empty, mutable value. +func (x *fastReflection_Module) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: client.x.epochs.module.Module")) + } + panic(fmt.Errorf("message client.x.epochs.module.Module does not contain field %s", fd.FullName())) + } +} + +// WhichOneof reports which field within the oneof is populated, +// returning nil if none are populated. +// It panics if the oneof descriptor does not belong to this message. +func (x *fastReflection_Module) WhichOneof(d protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { + switch d.FullName() { + default: + panic(fmt.Errorf("%s is not a oneof field in client.x.epochs.module.Module", d.FullName())) + } + panic("unreachable") +} + +// GetUnknown retrieves the entire list of unknown fields. +// The caller may only mutate the contents of the RawFields +// if the mutated bytes are stored back into the message with SetUnknown. +func (x *fastReflection_Module) GetUnknown() protoreflect.RawFields { + return x.unknownFields +} + +// SetUnknown stores an entire list of unknown fields. +// The raw fields must be syntactically valid according to the wire format. +// An implementation may panic if this is not the case. +// Once stored, the caller must not mutate the content of the RawFields. +// An empty RawFields may be passed to clear the fields. +// +// SetUnknown is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_Module) SetUnknown(fields protoreflect.RawFields) { + x.unknownFields = fields +} + +// IsValid reports whether the message is valid. +// +// An invalid message is an empty, read-only value. +// +// An invalid message often corresponds to a nil pointer of the concrete +// message type, but the details are implementation dependent. +// Validity is not part of the protobuf data model, and may not +// be preserved in marshaling or other operations. +func (x *fastReflection_Module) IsValid() bool { + return x != nil +} + +// ProtoMethods returns optional fastReflectionFeature-path implementations of various operations. +// This method may return nil. +// +// The returned methods type is identical to +// "google.golang.org/protobuf/runtime/protoiface".Methods. +// Consult the protoiface package documentation for details. +func (x *fastReflection_Module) ProtoMethods() *protoiface.Methods { + size := func(input protoiface.SizeInput) protoiface.SizeOutput { + x := input.Message.Interface().(*Module) + if x == nil { + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: 0, + } + } + options := runtime.SizeInputToOptions(input) + _ = options + var n int + var l int + _ = l + if x.unknownFields != nil { + n += len(x.unknownFields) + } + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: n, + } + } + + marshal := func(input protoiface.MarshalInput) (protoiface.MarshalOutput, error) { + x := input.Message.Interface().(*Module) + if x == nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + options := runtime.MarshalInputToOptions(input) + _ = options + size := options.Size(x) + dAtA := make([]byte, size) + i := len(dAtA) + _ = i + var l int + _ = l + if x.unknownFields != nil { + i -= len(x.unknownFields) + copy(dAtA[i:], x.unknownFields) + } + if input.Buf != nil { + input.Buf = append(input.Buf, dAtA...) + } else { + input.Buf = dAtA + } + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + unmarshal := func(input protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { + x := input.Message.Interface().(*Module) + if x == nil { + return protoiface.UnmarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Flags: input.Flags, + }, nil + } + options := runtime.UnmarshalInputToOptions(input) + _ = options + dAtA := input.Buf + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: Module: wiretype end group for non-group") + } + if fieldNum <= 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: Module: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := runtime.Skip(dAtA[iNdEx:]) + if err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if !options.DiscardUnknown { + x.unknownFields = append(x.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + } + iNdEx += skippy + } + } + + if iNdEx > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, nil + } + return &protoiface.Methods{ + NoUnkeyedLiterals: struct{}{}, + Flags: protoiface.SupportMarshalDeterministic | protoiface.SupportUnmarshalDiscardUnknown, + Size: size, + Marshal: marshal, + Unmarshal: unmarshal, + Merge: nil, + CheckInitialized: nil, + } +} + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.0 +// protoc (unknown) +// source: client/x/epochs/module/module.proto + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Module is the config object of the epochs module. +type Module struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Module) Reset() { + *x = Module{} + if protoimpl.UnsafeEnabled { + mi := &file_client_x_epochs_module_module_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Module) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Module) ProtoMessage() {} + +// Deprecated: Use Module.ProtoReflect.Descriptor instead. +func (*Module) Descriptor() ([]byte, []int) { + return file_client_x_epochs_module_module_proto_rawDescGZIP(), []int{0} +} + +var File_client_x_epochs_module_module_proto protoreflect.FileDescriptor + +var file_client_x_epochs_module_module_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x78, 0x2f, 0x65, 0x70, 0x6f, 0x63, 0x68, + 0x73, 0x2f, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x16, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x78, 0x2e, + 0x65, 0x70, 0x6f, 0x63, 0x68, 0x73, 0x2e, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x1a, 0x20, 0x63, + 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2f, 0x61, 0x70, 0x70, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x31, 0x2f, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0x3a, 0x0a, 0x06, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x3a, 0x30, 0xba, 0xc0, 0x96, 0xda, 0x01, + 0x2a, 0x0a, 0x28, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x69, + 0x70, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x2f, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x2f, 0x78, 0x2f, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x73, 0x42, 0xce, 0x01, 0x0a, 0x1a, + 0x63, 0x6f, 0x6d, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x78, 0x2e, 0x65, 0x70, 0x6f, + 0x63, 0x68, 0x73, 0x2e, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x42, 0x0b, 0x4d, 0x6f, 0x64, 0x75, + 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x27, 0x63, 0x6f, 0x73, 0x6d, 0x6f, + 0x73, 0x73, 0x64, 0x6b, 0x2e, 0x69, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x2f, 0x78, 0x2f, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x73, 0x2f, 0x6d, 0x6f, 0x64, 0x75, + 0x6c, 0x65, 0xa2, 0x02, 0x04, 0x43, 0x58, 0x45, 0x4d, 0xaa, 0x02, 0x16, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x2e, 0x58, 0x2e, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x73, 0x2e, 0x4d, 0x6f, 0x64, 0x75, + 0x6c, 0x65, 0xca, 0x02, 0x16, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5c, 0x58, 0x5c, 0x45, 0x70, + 0x6f, 0x63, 0x68, 0x73, 0x5c, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0xe2, 0x02, 0x22, 0x43, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x5c, 0x58, 0x5c, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x73, 0x5c, 0x4d, 0x6f, + 0x64, 0x75, 0x6c, 0x65, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0xea, 0x02, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x3a, 0x3a, 0x58, 0x3a, 0x3a, 0x45, 0x70, + 0x6f, 0x63, 0x68, 0x73, 0x3a, 0x3a, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_client_x_epochs_module_module_proto_rawDescOnce sync.Once + file_client_x_epochs_module_module_proto_rawDescData = file_client_x_epochs_module_module_proto_rawDesc +) + +func file_client_x_epochs_module_module_proto_rawDescGZIP() []byte { + file_client_x_epochs_module_module_proto_rawDescOnce.Do(func() { + file_client_x_epochs_module_module_proto_rawDescData = protoimpl.X.CompressGZIP(file_client_x_epochs_module_module_proto_rawDescData) + }) + return file_client_x_epochs_module_module_proto_rawDescData +} + +var file_client_x_epochs_module_module_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_client_x_epochs_module_module_proto_goTypes = []interface{}{ + (*Module)(nil), // 0: client.x.epochs.module.Module +} +var file_client_x_epochs_module_module_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_client_x_epochs_module_module_proto_init() } +func file_client_x_epochs_module_module_proto_init() { + if File_client_x_epochs_module_module_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_client_x_epochs_module_module_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Module); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_client_x_epochs_module_module_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_client_x_epochs_module_module_proto_goTypes, + DependencyIndexes: file_client_x_epochs_module_module_proto_depIdxs, + MessageInfos: file_client_x_epochs_module_module_proto_msgTypes, + }.Build() + File_client_x_epochs_module_module_proto = out.File + file_client_x_epochs_module_module_proto_rawDesc = nil + file_client_x_epochs_module_module_proto_goTypes = nil + file_client_x_epochs_module_module_proto_depIdxs = nil +} diff --git a/client/x/epochs/types/codec.go b/client/x/epochs/types/codec.go new file mode 100644 index 00000000..b03261a0 --- /dev/null +++ b/client/x/epochs/types/codec.go @@ -0,0 +1,10 @@ +package types + +import ( + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +func RegisterInterfaces(registrar cdctypes.InterfaceRegistry) { + registrar.RegisterImplementations((*sdk.Msg)(nil)) +} diff --git a/client/x/epochs/types/events.pb.go b/client/x/epochs/types/events.pb.go new file mode 100644 index 00000000..e149b3c2 --- /dev/null +++ b/client/x/epochs/types/events.pb.go @@ -0,0 +1,493 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: client/x/epochs/types/events.proto + +package types + +import ( + fmt "fmt" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// EventEpochEnd is an event emitted when an epoch end. +type EventEpochEnd struct { + EpochNumber int64 `protobuf:"varint,1,opt,name=epoch_number,json=epochNumber,proto3" json:"epoch_number,omitempty"` +} + +func (m *EventEpochEnd) Reset() { *m = EventEpochEnd{} } +func (m *EventEpochEnd) String() string { return proto.CompactTextString(m) } +func (*EventEpochEnd) ProtoMessage() {} +func (*EventEpochEnd) Descriptor() ([]byte, []int) { + return fileDescriptor_ee2d70eeda0e3075, []int{0} +} +func (m *EventEpochEnd) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventEpochEnd) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventEpochEnd.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventEpochEnd) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventEpochEnd.Merge(m, src) +} +func (m *EventEpochEnd) XXX_Size() int { + return m.Size() +} +func (m *EventEpochEnd) XXX_DiscardUnknown() { + xxx_messageInfo_EventEpochEnd.DiscardUnknown(m) +} + +var xxx_messageInfo_EventEpochEnd proto.InternalMessageInfo + +func (m *EventEpochEnd) GetEpochNumber() int64 { + if m != nil { + return m.EpochNumber + } + return 0 +} + +// EventEpochStart is an event emitted when an epoch start. +type EventEpochStart struct { + EpochNumber int64 `protobuf:"varint,1,opt,name=epoch_number,json=epochNumber,proto3" json:"epoch_number,omitempty"` + EpochStartTime int64 `protobuf:"varint,2,opt,name=epoch_start_time,json=epochStartTime,proto3" json:"epoch_start_time,omitempty"` +} + +func (m *EventEpochStart) Reset() { *m = EventEpochStart{} } +func (m *EventEpochStart) String() string { return proto.CompactTextString(m) } +func (*EventEpochStart) ProtoMessage() {} +func (*EventEpochStart) Descriptor() ([]byte, []int) { + return fileDescriptor_ee2d70eeda0e3075, []int{1} +} +func (m *EventEpochStart) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventEpochStart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventEpochStart.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventEpochStart) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventEpochStart.Merge(m, src) +} +func (m *EventEpochStart) XXX_Size() int { + return m.Size() +} +func (m *EventEpochStart) XXX_DiscardUnknown() { + xxx_messageInfo_EventEpochStart.DiscardUnknown(m) +} + +var xxx_messageInfo_EventEpochStart proto.InternalMessageInfo + +func (m *EventEpochStart) GetEpochNumber() int64 { + if m != nil { + return m.EpochNumber + } + return 0 +} + +func (m *EventEpochStart) GetEpochStartTime() int64 { + if m != nil { + return m.EpochStartTime + } + return 0 +} + +func init() { + proto.RegisterType((*EventEpochEnd)(nil), "client.x.epochs.types.EventEpochEnd") + proto.RegisterType((*EventEpochStart)(nil), "client.x.epochs.types.EventEpochStart") +} + +func init() { + proto.RegisterFile("client/x/epochs/types/events.proto", fileDescriptor_ee2d70eeda0e3075) +} + +var fileDescriptor_ee2d70eeda0e3075 = []byte{ + // 187 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4a, 0xce, 0xc9, 0x4c, + 0xcd, 0x2b, 0xd1, 0xaf, 0xd0, 0x4f, 0x2d, 0xc8, 0x4f, 0xce, 0x28, 0xd6, 0x2f, 0xa9, 0x2c, 0x48, + 0x2d, 0xd6, 0x4f, 0x2d, 0x4b, 0xcd, 0x2b, 0x29, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, + 0x85, 0xa8, 0xd1, 0xab, 0xd0, 0x83, 0xa8, 0xd1, 0x03, 0xab, 0x51, 0x32, 0xe2, 0xe2, 0x75, 0x05, + 0x29, 0x73, 0x05, 0x09, 0xba, 0xe6, 0xa5, 0x08, 0x29, 0x72, 0xf1, 0x80, 0x15, 0xc4, 0xe7, 0x95, + 0xe6, 0x26, 0xa5, 0x16, 0x49, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0x71, 0x83, 0xc5, 0xfc, 0xc0, + 0x42, 0x4a, 0x71, 0x5c, 0xfc, 0x08, 0x3d, 0xc1, 0x25, 0x89, 0x45, 0x25, 0x44, 0xe8, 0x12, 0xd2, + 0xe0, 0x12, 0x80, 0x28, 0x29, 0x06, 0xe9, 0x88, 0x2f, 0xc9, 0xcc, 0x4d, 0x95, 0x60, 0x02, 0x2b, + 0xe3, 0x4b, 0x85, 0x1b, 0x14, 0x92, 0x99, 0x9b, 0xea, 0xa4, 0x7f, 0xe2, 0x91, 0x1c, 0xe3, 0x85, + 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c, 0xc7, 0x70, 0xe1, 0xb1, 0x1c, 0xc3, + 0x8d, 0xc7, 0x72, 0x0c, 0x51, 0xa2, 0x58, 0x3d, 0x9a, 0xc4, 0x06, 0xf6, 0xa2, 0x31, 0x20, 0x00, + 0x00, 0xff, 0xff, 0xa9, 0x19, 0xc8, 0xb7, 0x08, 0x01, 0x00, 0x00, +} + +func (m *EventEpochEnd) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventEpochEnd) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventEpochEnd) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.EpochNumber != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.EpochNumber)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *EventEpochStart) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventEpochStart) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventEpochStart) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.EpochStartTime != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.EpochStartTime)) + i-- + dAtA[i] = 0x10 + } + if m.EpochNumber != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.EpochNumber)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintEvents(dAtA []byte, offset int, v uint64) int { + offset -= sovEvents(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *EventEpochEnd) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EpochNumber != 0 { + n += 1 + sovEvents(uint64(m.EpochNumber)) + } + return n +} + +func (m *EventEpochStart) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EpochNumber != 0 { + n += 1 + sovEvents(uint64(m.EpochNumber)) + } + if m.EpochStartTime != 0 { + n += 1 + sovEvents(uint64(m.EpochStartTime)) + } + return n +} + +func sovEvents(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozEvents(x uint64) (n int) { + return sovEvents(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *EventEpochEnd) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventEpochEnd: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventEpochEnd: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EpochNumber", wireType) + } + m.EpochNumber = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EpochNumber |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventEpochStart) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventEpochStart: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventEpochStart: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EpochNumber", wireType) + } + m.EpochNumber = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EpochNumber |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EpochStartTime", wireType) + } + m.EpochStartTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EpochStartTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEvents(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvents + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvents + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvents + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthEvents + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupEvents + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthEvents + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthEvents = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEvents = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupEvents = fmt.Errorf("proto: unexpected end of group") +) diff --git a/client/x/epochs/types/events.proto b/client/x/epochs/types/events.proto new file mode 100644 index 00000000..0df97db1 --- /dev/null +++ b/client/x/epochs/types/events.proto @@ -0,0 +1,17 @@ +// Since: x/epochs 0.1.0 +syntax = "proto3"; + +package client.x.epochs.types; + +option go_package = "client/x/epochs/types"; + +// EventEpochEnd is an event emitted when an epoch end. +message EventEpochEnd { + int64 epoch_number = 1; +} + +// EventEpochStart is an event emitted when an epoch start. +message EventEpochStart { + int64 epoch_number = 1; + int64 epoch_start_time = 2; +} diff --git a/client/x/epochs/types/genesis.go b/client/x/epochs/types/genesis.go new file mode 100644 index 00000000..00110b1c --- /dev/null +++ b/client/x/epochs/types/genesis.go @@ -0,0 +1,72 @@ +package types + +import ( + "errors" + "time" +) + +// DefaultIndex is the default capability global index. +const DefaultIndex uint64 = 1 + +func NewGenesisState(epochs []EpochInfo) *GenesisState { + return &GenesisState{Epochs: epochs} +} + +// DefaultGenesis returns the default Capability genesis state. +func DefaultGenesis() *GenesisState { + epochs := []EpochInfo{ + NewGenesisEpochInfo("day", time.Hour*24), // alphabetical order + NewGenesisEpochInfo("hour", time.Hour), + NewGenesisEpochInfo("minute", time.Minute), + NewGenesisEpochInfo("week", time.Hour*24*7), + } + + return NewGenesisState(epochs) +} + +// Validate performs basic genesis state validation returning an error upon any +// failure. +func (gs GenesisState) Validate() error { + epochIdentifiers := map[string]bool{} + for _, epoch := range gs.Epochs { + if err := epoch.Validate(); err != nil { + return err + } + if epochIdentifiers[epoch.Identifier] { + return errors.New("epoch identifier should be unique") + } + epochIdentifiers[epoch.Identifier] = true + } + + return nil +} + +// Validate also validates epoch info. +func (epoch EpochInfo) Validate() error { + if epoch.Identifier == "" { + return errors.New("epoch identifier should NOT be empty") + } + if epoch.Duration == 0 { + return errors.New("epoch duration should NOT be 0") + } + if epoch.CurrentEpoch < 0 { + return errors.New("epoch CurrentEpoch must be non-negative") + } + if epoch.CurrentEpochStartHeight < 0 { + return errors.New("epoch CurrentEpochStartHeight must be non-negative") + } + + return nil +} + +func NewGenesisEpochInfo(identifier string, duration time.Duration) EpochInfo { + return EpochInfo{ + Identifier: identifier, + StartTime: time.Time{}, + Duration: duration, + CurrentEpoch: 0, + CurrentEpochStartHeight: 0, + CurrentEpochStartTime: time.Time{}, + EpochCountingStarted: false, + } +} diff --git a/client/x/epochs/types/genesis.pb.go b/client/x/epochs/types/genesis.pb.go new file mode 100644 index 00000000..158e4abb --- /dev/null +++ b/client/x/epochs/types/genesis.pb.go @@ -0,0 +1,820 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: client/x/epochs/types/genesis.proto + +package types + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + github_com_cosmos_gogoproto_types "github.com/cosmos/gogoproto/types" + _ "google.golang.org/protobuf/types/known/durationpb" + _ "google.golang.org/protobuf/types/known/timestamppb" + io "io" + math "math" + math_bits "math/bits" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// EpochInfo is a struct that describes the data going into +// a timer defined by the x/epochs module. +type EpochInfo struct { + // identifier is a unique reference to this particular timer. + Identifier string `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` + // start_time is the time at which the timer first ever ticks. + // If start_time is in the future, the epoch will not begin until the start + // time. + StartTime time.Time `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3,stdtime" json:"start_time"` + // duration is the time in between epoch ticks. + // In order for intended behavior to be met, duration should + // be greater than the chains expected block time. + // Duration must be non-zero. + Duration time.Duration `protobuf:"bytes,3,opt,name=duration,proto3,stdduration" json:"duration,omitempty"` + // current_epoch is the current epoch number, or in other words, + // how many times has the timer 'ticked'. + // The first tick (current_epoch=1) is defined as + // the first block whose blocktime is greater than the EpochInfo start_time. + CurrentEpoch int64 `protobuf:"varint,4,opt,name=current_epoch,json=currentEpoch,proto3" json:"current_epoch,omitempty"` + // current_epoch_start_time describes the start time of the current timer + // interval. The interval is (current_epoch_start_time, + // current_epoch_start_time + duration] When the timer ticks, this is set to + // current_epoch_start_time = last_epoch_start_time + duration only one timer + // tick for a given identifier can occur per block. + // + // NOTE! The current_epoch_start_time may diverge significantly from the + // wall-clock time the epoch began at. Wall-clock time of epoch start may be + // >> current_epoch_start_time. Suppose current_epoch_start_time = 10, + // duration = 5. Suppose the chain goes offline at t=14, and comes back online + // at t=30, and produces blocks at every successive time. (t=31, 32, etc.) + // * The t=30 block will start the epoch for (10, 15] + // * The t=31 block will start the epoch for (15, 20] + // * The t=32 block will start the epoch for (20, 25] + // * The t=33 block will start the epoch for (25, 30] + // * The t=34 block will start the epoch for (30, 35] + // * The **t=36** block will start the epoch for (35, 40] + CurrentEpochStartTime time.Time `protobuf:"bytes,5,opt,name=current_epoch_start_time,json=currentEpochStartTime,proto3,stdtime" json:"current_epoch_start_time"` + // epoch_counting_started is a boolean, that indicates whether this + // epoch timer has began yet. + EpochCountingStarted bool `protobuf:"varint,6,opt,name=epoch_counting_started,json=epochCountingStarted,proto3" json:"epoch_counting_started,omitempty"` + // current_epoch_start_height is the block height at which the current epoch + // started. (The block height at which the timer last ticked) + CurrentEpochStartHeight int64 `protobuf:"varint,8,opt,name=current_epoch_start_height,json=currentEpochStartHeight,proto3" json:"current_epoch_start_height,omitempty"` +} + +func (m *EpochInfo) Reset() { *m = EpochInfo{} } +func (m *EpochInfo) String() string { return proto.CompactTextString(m) } +func (*EpochInfo) ProtoMessage() {} +func (*EpochInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_98958ed62d3b01f7, []int{0} +} +func (m *EpochInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EpochInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EpochInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EpochInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_EpochInfo.Merge(m, src) +} +func (m *EpochInfo) XXX_Size() int { + return m.Size() +} +func (m *EpochInfo) XXX_DiscardUnknown() { + xxx_messageInfo_EpochInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_EpochInfo proto.InternalMessageInfo + +func (m *EpochInfo) GetIdentifier() string { + if m != nil { + return m.Identifier + } + return "" +} + +func (m *EpochInfo) GetStartTime() time.Time { + if m != nil { + return m.StartTime + } + return time.Time{} +} + +func (m *EpochInfo) GetDuration() time.Duration { + if m != nil { + return m.Duration + } + return 0 +} + +func (m *EpochInfo) GetCurrentEpoch() int64 { + if m != nil { + return m.CurrentEpoch + } + return 0 +} + +func (m *EpochInfo) GetCurrentEpochStartTime() time.Time { + if m != nil { + return m.CurrentEpochStartTime + } + return time.Time{} +} + +func (m *EpochInfo) GetEpochCountingStarted() bool { + if m != nil { + return m.EpochCountingStarted + } + return false +} + +func (m *EpochInfo) GetCurrentEpochStartHeight() int64 { + if m != nil { + return m.CurrentEpochStartHeight + } + return 0 +} + +// GenesisState defines the epochs module's genesis state. +type GenesisState struct { + Epochs []EpochInfo `protobuf:"bytes,1,rep,name=epochs,proto3" json:"epochs"` +} + +func (m *GenesisState) Reset() { *m = GenesisState{} } +func (m *GenesisState) String() string { return proto.CompactTextString(m) } +func (*GenesisState) ProtoMessage() {} +func (*GenesisState) Descriptor() ([]byte, []int) { + return fileDescriptor_98958ed62d3b01f7, []int{1} +} +func (m *GenesisState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GenesisState) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenesisState.Merge(m, src) +} +func (m *GenesisState) XXX_Size() int { + return m.Size() +} +func (m *GenesisState) XXX_DiscardUnknown() { + xxx_messageInfo_GenesisState.DiscardUnknown(m) +} + +var xxx_messageInfo_GenesisState proto.InternalMessageInfo + +func (m *GenesisState) GetEpochs() []EpochInfo { + if m != nil { + return m.Epochs + } + return nil +} + +func init() { + proto.RegisterType((*EpochInfo)(nil), "client.x.epochs.types.EpochInfo") + proto.RegisterType((*GenesisState)(nil), "client.x.epochs.types.GenesisState") +} + +func init() { + proto.RegisterFile("client/x/epochs/types/genesis.proto", fileDescriptor_98958ed62d3b01f7) +} + +var fileDescriptor_98958ed62d3b01f7 = []byte{ + // 425 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x92, 0x3f, 0x8f, 0xd3, 0x30, + 0x18, 0xc6, 0x6b, 0x5a, 0x4a, 0xea, 0x3b, 0x24, 0x64, 0x5d, 0xc1, 0x74, 0x70, 0xa2, 0xbb, 0x25, + 0x03, 0xb2, 0xa5, 0x83, 0x0d, 0x89, 0x21, 0x07, 0xe2, 0xcf, 0xc0, 0x90, 0xc2, 0x82, 0x84, 0xaa, + 0x5c, 0xea, 0xa6, 0x96, 0x2e, 0x76, 0x94, 0xbc, 0x91, 0xee, 0xbe, 0xc5, 0x8d, 0x7c, 0x20, 0x86, + 0x1b, 0x6f, 0x64, 0x3a, 0x50, 0xbb, 0xf1, 0x29, 0x50, 0xec, 0xa4, 0x2a, 0xb4, 0x0b, 0x5b, 0xeb, + 0xe7, 0x79, 0x7f, 0x8f, 0x1f, 0xe7, 0xc5, 0x27, 0xe9, 0x85, 0x92, 0x1a, 0xc4, 0xa5, 0x90, 0x85, + 0x49, 0x97, 0x95, 0x80, 0xab, 0x42, 0x56, 0x22, 0x93, 0x5a, 0x56, 0xaa, 0xe2, 0x45, 0x69, 0xc0, + 0x90, 0xb1, 0x33, 0xf1, 0x4b, 0xee, 0x4c, 0xdc, 0x9a, 0x26, 0x47, 0x99, 0xc9, 0x8c, 0x75, 0x88, + 0xe6, 0x97, 0x33, 0x4f, 0x58, 0x66, 0x4c, 0x76, 0x21, 0x85, 0xfd, 0x77, 0x5e, 0x2f, 0xc4, 0xbc, + 0x2e, 0x13, 0x50, 0x46, 0xb7, 0xba, 0xff, 0xaf, 0x0e, 0x2a, 0x97, 0x15, 0x24, 0x79, 0xe1, 0x0c, + 0xc7, 0xdf, 0xfb, 0x78, 0xf4, 0xa6, 0xc9, 0x79, 0xaf, 0x17, 0x86, 0x30, 0x8c, 0xd5, 0x5c, 0x6a, + 0x50, 0x0b, 0x25, 0x4b, 0x8a, 0x02, 0x14, 0x8e, 0xe2, 0xad, 0x13, 0x72, 0x86, 0x71, 0x05, 0x49, + 0x09, 0xb3, 0x06, 0x43, 0xef, 0x05, 0x28, 0x3c, 0x38, 0x9d, 0x70, 0x97, 0xc1, 0xbb, 0x0c, 0xfe, + 0xa9, 0xcb, 0x88, 0xbc, 0x9b, 0x3b, 0xbf, 0x77, 0xfd, 0xd3, 0x47, 0xf1, 0xc8, 0xce, 0x35, 0x0a, + 0xf9, 0x8c, 0xbd, 0xee, 0x96, 0xb4, 0x6f, 0x11, 0x4f, 0x77, 0x10, 0xaf, 0x5b, 0x43, 0xc4, 0x1a, + 0xc2, 0xef, 0x3b, 0x9f, 0x74, 0x23, 0xcf, 0x4c, 0xae, 0x40, 0xe6, 0x05, 0x5c, 0x7d, 0x6b, 0xb8, + 0x1b, 0x14, 0x39, 0xc1, 0x0f, 0xd3, 0xba, 0x2c, 0xa5, 0x86, 0x99, 0x7d, 0x38, 0x3a, 0x08, 0x50, + 0xd8, 0x8f, 0x0f, 0xdb, 0x43, 0x5b, 0x92, 0x7c, 0xc5, 0xf4, 0x2f, 0xd3, 0x6c, 0xab, 0xce, 0xfd, + 0xff, 0xa8, 0x33, 0xde, 0xa6, 0x4e, 0x37, 0xd5, 0x5e, 0xe0, 0xc7, 0x0e, 0x9b, 0x9a, 0x5a, 0x83, + 0xd2, 0x99, 0xe3, 0xcb, 0x39, 0x1d, 0x06, 0x28, 0xf4, 0xe2, 0x23, 0xab, 0x9e, 0xb5, 0xe2, 0xd4, + 0x69, 0xe4, 0x25, 0x9e, 0xec, 0xbb, 0xd4, 0x52, 0xaa, 0x6c, 0x09, 0xd4, 0xb3, 0x35, 0x9e, 0xec, + 0x04, 0xbe, 0xb3, 0xf2, 0x87, 0x81, 0xf7, 0xe0, 0x91, 0x77, 0xfc, 0x11, 0x1f, 0xbe, 0x75, 0x5b, + 0x34, 0x85, 0x04, 0x24, 0x79, 0x85, 0x87, 0x6e, 0x7b, 0x28, 0x0a, 0xfa, 0xe1, 0xc1, 0x69, 0xc0, + 0xf7, 0x6e, 0x15, 0xdf, 0x7c, 0xfa, 0x68, 0xd0, 0x74, 0x8b, 0xdb, 0xa9, 0x48, 0xdc, 0xac, 0x18, + 0xba, 0x5d, 0x31, 0xf4, 0x6b, 0xc5, 0xd0, 0xf5, 0x9a, 0xf5, 0x6e, 0xd7, 0xac, 0xf7, 0x63, 0xcd, + 0x7a, 0x5f, 0xc6, 0x7b, 0x77, 0xf8, 0x7c, 0x68, 0x9f, 0xeb, 0xf9, 0x9f, 0x00, 0x00, 0x00, 0xff, + 0xff, 0xb1, 0xd4, 0xe2, 0x83, 0xe3, 0x02, 0x00, 0x00, +} + +func (m *EpochInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EpochInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EpochInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CurrentEpochStartHeight != 0 { + i = encodeVarintGenesis(dAtA, i, uint64(m.CurrentEpochStartHeight)) + i-- + dAtA[i] = 0x40 + } + if m.EpochCountingStarted { + i-- + if m.EpochCountingStarted { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + n1, err1 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.CurrentEpochStartTime, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.CurrentEpochStartTime):]) + if err1 != nil { + return 0, err1 + } + i -= n1 + i = encodeVarintGenesis(dAtA, i, uint64(n1)) + i-- + dAtA[i] = 0x2a + if m.CurrentEpoch != 0 { + i = encodeVarintGenesis(dAtA, i, uint64(m.CurrentEpoch)) + i-- + dAtA[i] = 0x20 + } + n2, err2 := github_com_cosmos_gogoproto_types.StdDurationMarshalTo(m.Duration, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdDuration(m.Duration):]) + if err2 != nil { + return 0, err2 + } + i -= n2 + i = encodeVarintGenesis(dAtA, i, uint64(n2)) + i-- + dAtA[i] = 0x1a + n3, err3 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.StartTime, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.StartTime):]) + if err3 != nil { + return 0, err3 + } + i -= n3 + i = encodeVarintGenesis(dAtA, i, uint64(n3)) + i-- + dAtA[i] = 0x12 + if len(m.Identifier) > 0 { + i -= len(m.Identifier) + copy(dAtA[i:], m.Identifier) + i = encodeVarintGenesis(dAtA, i, uint64(len(m.Identifier))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GenesisState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Epochs) > 0 { + for iNdEx := len(m.Epochs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Epochs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { + offset -= sovGenesis(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *EpochInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Identifier) + if l > 0 { + n += 1 + l + sovGenesis(uint64(l)) + } + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.StartTime) + n += 1 + l + sovGenesis(uint64(l)) + l = github_com_cosmos_gogoproto_types.SizeOfStdDuration(m.Duration) + n += 1 + l + sovGenesis(uint64(l)) + if m.CurrentEpoch != 0 { + n += 1 + sovGenesis(uint64(m.CurrentEpoch)) + } + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.CurrentEpochStartTime) + n += 1 + l + sovGenesis(uint64(l)) + if m.EpochCountingStarted { + n += 2 + } + if m.CurrentEpochStartHeight != 0 { + n += 1 + sovGenesis(uint64(m.CurrentEpochStartHeight)) + } + return n +} + +func (m *GenesisState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Epochs) > 0 { + for _, e := range m.Epochs { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + return n +} + +func sovGenesis(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenesis(x uint64) (n int) { + return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *EpochInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EpochInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EpochInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Identifier", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Identifier = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.StartTime, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdDurationUnmarshal(&m.Duration, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentEpoch", wireType) + } + m.CurrentEpoch = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentEpoch |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentEpochStartTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.CurrentEpochStartTime, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EpochCountingStarted", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.EpochCountingStarted = bool(v != 0) + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentEpochStartHeight", wireType) + } + m.CurrentEpochStartHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentEpochStartHeight |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GenesisState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Epochs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Epochs = append(m.Epochs, EpochInfo{}) + if err := m.Epochs[len(m.Epochs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenesis(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenesis + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenesis + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenesis + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") +) diff --git a/client/x/epochs/types/genesis.proto b/client/x/epochs/types/genesis.proto new file mode 100644 index 00000000..9420542d --- /dev/null +++ b/client/x/epochs/types/genesis.proto @@ -0,0 +1,60 @@ +syntax = "proto3"; +package client.x.epochs.types; + +import "gogoproto/gogo.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "client/x/epochs/types"; + +// EpochInfo is a struct that describes the data going into +// a timer defined by the x/epochs module. +message EpochInfo { + // identifier is a unique reference to this particular timer. + string identifier = 1; + // start_time is the time at which the timer first ever ticks. + // If start_time is in the future, the epoch will not begin until the start + // time. + google.protobuf.Timestamp start_time = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + // duration is the time in between epoch ticks. + // In order for intended behavior to be met, duration should + // be greater than the chains expected block time. + // Duration must be non-zero. + google.protobuf.Duration duration = 3 + [(gogoproto.nullable) = false, (gogoproto.stdduration) = true, (gogoproto.jsontag) = "duration,omitempty"]; + // current_epoch is the current epoch number, or in other words, + // how many times has the timer 'ticked'. + // The first tick (current_epoch=1) is defined as + // the first block whose blocktime is greater than the EpochInfo start_time. + int64 current_epoch = 4; + // current_epoch_start_time describes the start time of the current timer + // interval. The interval is (current_epoch_start_time, + // current_epoch_start_time + duration] When the timer ticks, this is set to + // current_epoch_start_time = last_epoch_start_time + duration only one timer + // tick for a given identifier can occur per block. + // + // NOTE! The current_epoch_start_time may diverge significantly from the + // wall-clock time the epoch began at. Wall-clock time of epoch start may be + // >> current_epoch_start_time. Suppose current_epoch_start_time = 10, + // duration = 5. Suppose the chain goes offline at t=14, and comes back online + // at t=30, and produces blocks at every successive time. (t=31, 32, etc.) + // * The t=30 block will start the epoch for (10, 15] + // * The t=31 block will start the epoch for (15, 20] + // * The t=32 block will start the epoch for (20, 25] + // * The t=33 block will start the epoch for (25, 30] + // * The t=34 block will start the epoch for (30, 35] + // * The **t=36** block will start the epoch for (35, 40] + google.protobuf.Timestamp current_epoch_start_time = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + // epoch_counting_started is a boolean, that indicates whether this + // epoch timer has began yet. + bool epoch_counting_started = 6; + reserved 7; + // current_epoch_start_height is the block height at which the current epoch + // started. (The block height at which the timer last ticked) + int64 current_epoch_start_height = 8; +} + +// GenesisState defines the epochs module's genesis state. +message GenesisState { + repeated EpochInfo epochs = 1 [(gogoproto.nullable) = false]; +} diff --git a/client/x/epochs/types/hooks.go b/client/x/epochs/types/hooks.go new file mode 100644 index 00000000..a18f4621 --- /dev/null +++ b/client/x/epochs/types/hooks.go @@ -0,0 +1,65 @@ +package types + +import ( + "context" + "errors" + + storyerr "github.com/piplabs/story/lib/errors" +) + +type EpochHooks interface { + // the first block whose timestamp is after the duration is counted as the end of the epoch + AfterEpochEnd(ctx context.Context, epochIdentifier string, epochNumber int64) error + // new epoch is next block of epoch end block + BeforeEpochStart(ctx context.Context, epochIdentifier string, epochNumber int64) error + // Returns the name of the module implementing epoch hook. + GetModuleName() string +} + +var _ EpochHooks = MultiEpochHooks{} + +// combine multiple hooks, all hook functions are run in array sequence. +type MultiEpochHooks []EpochHooks + +// GetModuleName implements EpochHooks. +func (MultiEpochHooks) GetModuleName() string { + return ModuleName +} + +func NewMultiEpochHooks(hooks ...EpochHooks) MultiEpochHooks { + return hooks +} + +// AfterEpochEnd is called when epoch is going to be ended, epochNumber is the number of epoch that is ending. +func (h MultiEpochHooks) AfterEpochEnd(ctx context.Context, epochIdentifier string, epochNumber int64) error { + var errs error + for i := range h { + errs = errors.Join(errs, h[i].AfterEpochEnd(ctx, epochIdentifier, epochNumber)) + } + + if errs != nil { + return storyerr.Wrap(errs, "after epoch end") + } + + return nil +} + +// BeforeEpochStart is called when epoch is going to be started, epochNumber is the number of epoch that is starting. +func (h MultiEpochHooks) BeforeEpochStart(ctx context.Context, epochIdentifier string, epochNumber int64) error { + var errs error + for i := range h { + errs = errors.Join(errs, h[i].BeforeEpochStart(ctx, epochIdentifier, epochNumber)) + } + + if errs != nil { + return storyerr.Wrap(errs, "before epoch start") + } + + return nil +} + +// EpochHooksWrapper is a wrapper for modules to inject EpochHooks using depinject. +type EpochHooksWrapper struct{ EpochHooks } + +// IsOnePerModuleType implements the depinject.OnePerModuleType interface. +func (EpochHooksWrapper) IsOnePerModuleType() {} diff --git a/client/x/epochs/types/hooks_test.go b/client/x/epochs/types/hooks_test.go new file mode 100644 index 00000000..04804acc --- /dev/null +++ b/client/x/epochs/types/hooks_test.go @@ -0,0 +1,121 @@ +package types_test + +import ( + "context" + "testing" + + "cosmossdk.io/errors" + storetypes "cosmossdk.io/store/types" + + "github.com/cosmos/cosmos-sdk/testutil" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/suite" + + "github.com/piplabs/story/client/x/epochs/types" +) + +type KeeperTestSuite struct { + suite.Suite + Ctx sdk.Context +} + +func TestKeeperTestSuite(t *testing.T) { + t.Parallel() + suite.Run(t, new(KeeperTestSuite)) +} + +func (s *KeeperTestSuite) SetupTest() { + s.Ctx = testutil.DefaultContext(storetypes.NewKVStoreKey(types.StoreKey), storetypes.NewTransientStoreKey("transient_test")) +} + +var errDummy = errors.New("9", 9, "dummyError") + +// dummyEpochHook is a struct satisfying the epoch hook interface, +// that maintains a counter for how many times its been successfully called, +// and a boolean for whether it should panic during its execution. +type dummyEpochHook struct { + successCounter int + shouldError bool +} + +// GetModuleName implements types.EpochHooks. +func (*dummyEpochHook) GetModuleName() string { + return "dummy" +} + +func (hook *dummyEpochHook) AfterEpochEnd(ctx context.Context, epochIdentifier string, epochNumber int64) error { + if hook.shouldError { + return errDummy + } + hook.successCounter += 1 + + return nil +} + +func (hook *dummyEpochHook) BeforeEpochStart(ctx context.Context, epochIdentifier string, epochNumber int64) error { + if hook.shouldError { + return errDummy + } + hook.successCounter += 1 + + return nil +} + +func (hook *dummyEpochHook) Clone() *dummyEpochHook { + newHook := dummyEpochHook{successCounter: hook.successCounter, shouldError: hook.shouldError} + return &newHook +} + +var _ types.EpochHooks = &dummyEpochHook{} + +func (s *KeeperTestSuite) TestHooksPanicRecovery() { + errorHook := dummyEpochHook{shouldError: true} + noErrorHook := dummyEpochHook{shouldError: false} + simpleHooks := []dummyEpochHook{errorHook, noErrorHook} + + tests := []struct { + hooks []dummyEpochHook + expectedCounterValues []int + lenEvents int + expErr bool + }{ + {[]dummyEpochHook{errorHook}, []int{0}, 0, true}, + {simpleHooks, []int{0, 1, 0, 1}, 2, true}, + } + + for tcIndex, tc := range tests { + for epochActionSelector := range 2 { + s.SetupTest() + hookRefs := []types.EpochHooks{} + + for _, hook := range tc.hooks { + hookRefs = append(hookRefs, hook.Clone()) + } + + hooks := types.NewMultiEpochHooks(hookRefs...) + + //nolint:nestif // Cosmos style + if epochActionSelector == 0 { + err := hooks.BeforeEpochStart(s.Ctx, "id", 0) + if tc.expErr { + s.Require().Error(err) + } else { + s.Require().NoError(err) + } + } else if epochActionSelector == 1 { + err := hooks.AfterEpochEnd(s.Ctx, "id", 0) + if tc.expErr { + s.Require().Error(err) + } else { + s.Require().NoError(err) + } + } + + for i := range hooks { + epochHook, ok := hookRefs[i].(*dummyEpochHook) + s.Require().True(ok) + s.Require().Equal(tc.expectedCounterValues[i], epochHook.successCounter, "test case index %d", tcIndex) + } + } + } +} diff --git a/client/x/epochs/types/identifier.go b/client/x/epochs/types/identifier.go new file mode 100644 index 00000000..2e7ebd31 --- /dev/null +++ b/client/x/epochs/types/identifier.go @@ -0,0 +1,22 @@ +package types + +import ( + "fmt" +) + +func ValidateEpochIdentifierInterface(i any) error { + v, ok := i.(string) + if !ok { + return fmt.Errorf("invalid parameter type: %T", i) + } + + return ValidateEpochIdentifierString(v) +} + +func ValidateEpochIdentifierString(s string) error { + if s == "" { + return fmt.Errorf("empty distribution epoch identifier: %+v", s) + } + + return nil +} diff --git a/client/x/epochs/types/keys.go b/client/x/epochs/types/keys.go new file mode 100644 index 00000000..27d7a724 --- /dev/null +++ b/client/x/epochs/types/keys.go @@ -0,0 +1,16 @@ +package types + +import ( + "cosmossdk.io/collections" +) + +const ( + // ModuleName defines the module name. + ModuleName = "epochs" + + // StoreKey defines the primary module store key. + StoreKey = ModuleName +) + +// KeyPrefixEpoch defines prefix key for storing epochs. +var KeyPrefixEpoch = collections.NewPrefix(1) diff --git a/client/x/epochs/types/query.pb.go b/client/x/epochs/types/query.pb.go new file mode 100644 index 00000000..29e534b2 --- /dev/null +++ b/client/x/epochs/types/query.pb.go @@ -0,0 +1,908 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: client/x/epochs/types/query.proto + +package types + +import ( + context "context" + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type QueryEpochsInfoRequest struct { +} + +func (m *QueryEpochsInfoRequest) Reset() { *m = QueryEpochsInfoRequest{} } +func (m *QueryEpochsInfoRequest) String() string { return proto.CompactTextString(m) } +func (*QueryEpochsInfoRequest) ProtoMessage() {} +func (*QueryEpochsInfoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_78cd7c4fa831b33b, []int{0} +} +func (m *QueryEpochsInfoRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryEpochsInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryEpochsInfoRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryEpochsInfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryEpochsInfoRequest.Merge(m, src) +} +func (m *QueryEpochsInfoRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryEpochsInfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryEpochsInfoRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryEpochsInfoRequest proto.InternalMessageInfo + +type QueryEpochsInfoResponse struct { + Epochs []EpochInfo `protobuf:"bytes,1,rep,name=epochs,proto3" json:"epochs"` +} + +func (m *QueryEpochsInfoResponse) Reset() { *m = QueryEpochsInfoResponse{} } +func (m *QueryEpochsInfoResponse) String() string { return proto.CompactTextString(m) } +func (*QueryEpochsInfoResponse) ProtoMessage() {} +func (*QueryEpochsInfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_78cd7c4fa831b33b, []int{1} +} +func (m *QueryEpochsInfoResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryEpochsInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryEpochsInfoResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryEpochsInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryEpochsInfoResponse.Merge(m, src) +} +func (m *QueryEpochsInfoResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryEpochsInfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryEpochsInfoResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryEpochsInfoResponse proto.InternalMessageInfo + +func (m *QueryEpochsInfoResponse) GetEpochs() []EpochInfo { + if m != nil { + return m.Epochs + } + return nil +} + +type QueryCurrentEpochRequest struct { + Identifier string `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` +} + +func (m *QueryCurrentEpochRequest) Reset() { *m = QueryCurrentEpochRequest{} } +func (m *QueryCurrentEpochRequest) String() string { return proto.CompactTextString(m) } +func (*QueryCurrentEpochRequest) ProtoMessage() {} +func (*QueryCurrentEpochRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_78cd7c4fa831b33b, []int{2} +} +func (m *QueryCurrentEpochRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryCurrentEpochRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryCurrentEpochRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryCurrentEpochRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryCurrentEpochRequest.Merge(m, src) +} +func (m *QueryCurrentEpochRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryCurrentEpochRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryCurrentEpochRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryCurrentEpochRequest proto.InternalMessageInfo + +func (m *QueryCurrentEpochRequest) GetIdentifier() string { + if m != nil { + return m.Identifier + } + return "" +} + +type QueryCurrentEpochResponse struct { + CurrentEpoch int64 `protobuf:"varint,1,opt,name=current_epoch,json=currentEpoch,proto3" json:"current_epoch,omitempty"` +} + +func (m *QueryCurrentEpochResponse) Reset() { *m = QueryCurrentEpochResponse{} } +func (m *QueryCurrentEpochResponse) String() string { return proto.CompactTextString(m) } +func (*QueryCurrentEpochResponse) ProtoMessage() {} +func (*QueryCurrentEpochResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_78cd7c4fa831b33b, []int{3} +} +func (m *QueryCurrentEpochResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryCurrentEpochResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryCurrentEpochResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryCurrentEpochResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryCurrentEpochResponse.Merge(m, src) +} +func (m *QueryCurrentEpochResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryCurrentEpochResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryCurrentEpochResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryCurrentEpochResponse proto.InternalMessageInfo + +func (m *QueryCurrentEpochResponse) GetCurrentEpoch() int64 { + if m != nil { + return m.CurrentEpoch + } + return 0 +} + +func init() { + proto.RegisterType((*QueryEpochsInfoRequest)(nil), "client.x.epochs.types.QueryEpochsInfoRequest") + proto.RegisterType((*QueryEpochsInfoResponse)(nil), "client.x.epochs.types.QueryEpochsInfoResponse") + proto.RegisterType((*QueryCurrentEpochRequest)(nil), "client.x.epochs.types.QueryCurrentEpochRequest") + proto.RegisterType((*QueryCurrentEpochResponse)(nil), "client.x.epochs.types.QueryCurrentEpochResponse") +} + +func init() { proto.RegisterFile("client/x/epochs/types/query.proto", fileDescriptor_78cd7c4fa831b33b) } + +var fileDescriptor_78cd7c4fa831b33b = []byte{ + // 367 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x31, 0x4b, 0x3b, 0x31, + 0x18, 0xc6, 0x2f, 0xed, 0xff, 0x5f, 0x30, 0xd6, 0x25, 0x58, 0x3d, 0x0f, 0x49, 0xaf, 0xd7, 0xc1, + 0x2e, 0x26, 0x5a, 0x37, 0x07, 0x91, 0x8a, 0x83, 0xa3, 0xb7, 0xe9, 0x22, 0xf5, 0x4c, 0xcf, 0x40, + 0x49, 0xae, 0x97, 0x54, 0xda, 0xd5, 0x5d, 0x10, 0xdc, 0xfc, 0x1c, 0x7e, 0x88, 0x8e, 0x05, 0x17, + 0x27, 0x91, 0xd6, 0x0f, 0x22, 0xcd, 0x05, 0xa9, 0xf4, 0x94, 0x6e, 0x21, 0xf9, 0x3d, 0xef, 0xf3, + 0xe4, 0x7d, 0x5f, 0x58, 0x8b, 0xba, 0x9c, 0x09, 0x4d, 0x07, 0x94, 0x25, 0x32, 0xba, 0x55, 0x54, + 0x0f, 0x13, 0xa6, 0x68, 0xaf, 0xcf, 0xd2, 0x21, 0x49, 0x52, 0xa9, 0x25, 0xaa, 0x64, 0x08, 0x19, + 0x90, 0x0c, 0x21, 0x06, 0xf1, 0xd6, 0x63, 0x19, 0x4b, 0x43, 0xd0, 0xd9, 0x29, 0x83, 0xbd, 0xed, + 0x58, 0xca, 0xb8, 0xcb, 0x68, 0x3b, 0xe1, 0xb4, 0x2d, 0x84, 0xd4, 0x6d, 0xcd, 0xa5, 0x50, 0xf6, + 0xb5, 0x9e, 0xef, 0x16, 0x33, 0xc1, 0x14, 0xb7, 0x50, 0xe0, 0xc2, 0x8d, 0xf3, 0x99, 0xfd, 0xa9, + 0x41, 0xce, 0x44, 0x47, 0x86, 0xac, 0xd7, 0x67, 0x4a, 0x07, 0x17, 0x70, 0x73, 0xe1, 0x45, 0x25, + 0x52, 0x28, 0x86, 0x8e, 0x60, 0x29, 0x2b, 0xe9, 0x02, 0xbf, 0xd8, 0x58, 0x6d, 0xfa, 0x24, 0x37, + 0x35, 0x31, 0xd2, 0x99, 0xb2, 0xf5, 0x6f, 0xf4, 0x5e, 0x75, 0x42, 0xab, 0x0a, 0x0e, 0xa1, 0x6b, + 0x4a, 0x9f, 0xf4, 0xd3, 0x94, 0x09, 0x6d, 0x30, 0x6b, 0x8b, 0x30, 0x84, 0xfc, 0x86, 0x09, 0xcd, + 0x3b, 0x9c, 0xa5, 0x2e, 0xf0, 0x41, 0x63, 0x25, 0x9c, 0xbb, 0x09, 0x8e, 0xe1, 0x56, 0x8e, 0xd6, + 0x06, 0xab, 0xc3, 0xb5, 0x28, 0xbb, 0xbf, 0x32, 0x56, 0x46, 0x5f, 0x0c, 0xcb, 0xd1, 0x1c, 0xdc, + 0x7c, 0x29, 0xc0, 0xff, 0xa6, 0x04, 0x7a, 0x00, 0x10, 0x7e, 0x67, 0x54, 0x68, 0xf7, 0x97, 0x6f, + 0xe4, 0x37, 0xc8, 0x23, 0xcb, 0xe2, 0x59, 0xb8, 0xc0, 0xbf, 0x7f, 0xfd, 0x7c, 0x2a, 0x78, 0xc8, + 0xa5, 0x76, 0x30, 0x76, 0x2c, 0x77, 0xfb, 0xf6, 0x84, 0x9e, 0x01, 0x2c, 0xcf, 0xff, 0x0b, 0xd1, + 0xbf, 0x2c, 0x72, 0xba, 0xe7, 0xed, 0x2d, 0x2f, 0xb0, 0xa9, 0x76, 0x4c, 0xaa, 0x1a, 0xaa, 0x2e, + 0xa6, 0xfa, 0xd1, 0xca, 0x16, 0x1d, 0x4d, 0x30, 0x18, 0x4f, 0x30, 0xf8, 0x98, 0x60, 0xf0, 0x38, + 0xc5, 0xce, 0x78, 0x8a, 0x9d, 0xb7, 0x29, 0x76, 0x2e, 0x2b, 0xb9, 0x8b, 0x76, 0x5d, 0x32, 0x1b, + 0x76, 0xf0, 0x15, 0x00, 0x00, 0xff, 0xff, 0x7b, 0x87, 0x4b, 0x27, 0xf6, 0x02, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryClient is the client API for Query service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryClient interface { + // EpochInfos provide running epochInfos + EpochInfos(ctx context.Context, in *QueryEpochsInfoRequest, opts ...grpc.CallOption) (*QueryEpochsInfoResponse, error) + // CurrentEpoch provide current epoch of specified identifier + CurrentEpoch(ctx context.Context, in *QueryCurrentEpochRequest, opts ...grpc.CallOption) (*QueryCurrentEpochResponse, error) +} + +type queryClient struct { + cc grpc1.ClientConn +} + +func NewQueryClient(cc grpc1.ClientConn) QueryClient { + return &queryClient{cc} +} + +func (c *queryClient) EpochInfos(ctx context.Context, in *QueryEpochsInfoRequest, opts ...grpc.CallOption) (*QueryEpochsInfoResponse, error) { + out := new(QueryEpochsInfoResponse) + err := c.cc.Invoke(ctx, "/client.x.epochs.types.Query/EpochInfos", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) CurrentEpoch(ctx context.Context, in *QueryCurrentEpochRequest, opts ...grpc.CallOption) (*QueryCurrentEpochResponse, error) { + out := new(QueryCurrentEpochResponse) + err := c.cc.Invoke(ctx, "/client.x.epochs.types.Query/CurrentEpoch", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryServer is the server API for Query service. +type QueryServer interface { + // EpochInfos provide running epochInfos + EpochInfos(context.Context, *QueryEpochsInfoRequest) (*QueryEpochsInfoResponse, error) + // CurrentEpoch provide current epoch of specified identifier + CurrentEpoch(context.Context, *QueryCurrentEpochRequest) (*QueryCurrentEpochResponse, error) +} + +// UnimplementedQueryServer can be embedded to have forward compatible implementations. +type UnimplementedQueryServer struct { +} + +func (*UnimplementedQueryServer) EpochInfos(ctx context.Context, req *QueryEpochsInfoRequest) (*QueryEpochsInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method EpochInfos not implemented") +} +func (*UnimplementedQueryServer) CurrentEpoch(ctx context.Context, req *QueryCurrentEpochRequest) (*QueryCurrentEpochResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CurrentEpoch not implemented") +} + +func RegisterQueryServer(s grpc1.Server, srv QueryServer) { + s.RegisterService(&_Query_serviceDesc, srv) +} + +func _Query_EpochInfos_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryEpochsInfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).EpochInfos(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/client.x.epochs.types.Query/EpochInfos", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).EpochInfos(ctx, req.(*QueryEpochsInfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_CurrentEpoch_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryCurrentEpochRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).CurrentEpoch(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/client.x.epochs.types.Query/CurrentEpoch", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).CurrentEpoch(ctx, req.(*QueryCurrentEpochRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Query_serviceDesc = grpc.ServiceDesc{ + ServiceName: "client.x.epochs.types.Query", + HandlerType: (*QueryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "EpochInfos", + Handler: _Query_EpochInfos_Handler, + }, + { + MethodName: "CurrentEpoch", + Handler: _Query_CurrentEpoch_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "client/x/epochs/types/query.proto", +} + +func (m *QueryEpochsInfoRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryEpochsInfoRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryEpochsInfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *QueryEpochsInfoResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryEpochsInfoResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryEpochsInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Epochs) > 0 { + for iNdEx := len(m.Epochs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Epochs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryCurrentEpochRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryCurrentEpochRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryCurrentEpochRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Identifier) > 0 { + i -= len(m.Identifier) + copy(dAtA[i:], m.Identifier) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Identifier))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryCurrentEpochResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryCurrentEpochResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryCurrentEpochResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CurrentEpoch != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.CurrentEpoch)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { + offset -= sovQuery(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryEpochsInfoRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *QueryEpochsInfoResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Epochs) > 0 { + for _, e := range m.Epochs { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + return n +} + +func (m *QueryCurrentEpochRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Identifier) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryCurrentEpochResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CurrentEpoch != 0 { + n += 1 + sovQuery(uint64(m.CurrentEpoch)) + } + return n +} + +func sovQuery(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozQuery(x uint64) (n int) { + return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryEpochsInfoRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryEpochsInfoRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryEpochsInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryEpochsInfoResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryEpochsInfoResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryEpochsInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Epochs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Epochs = append(m.Epochs, EpochInfo{}) + if err := m.Epochs[len(m.Epochs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryCurrentEpochRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryCurrentEpochRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryCurrentEpochRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Identifier", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Identifier = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryCurrentEpochResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryCurrentEpochResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryCurrentEpochResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentEpoch", wireType) + } + m.CurrentEpoch = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentEpoch |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipQuery(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthQuery + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupQuery + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthQuery + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") +) diff --git a/client/x/epochs/types/query.proto b/client/x/epochs/types/query.proto new file mode 100644 index 00000000..fd4f5916 --- /dev/null +++ b/client/x/epochs/types/query.proto @@ -0,0 +1,32 @@ +syntax = "proto3"; +package client.x.epochs.types; + +import "gogoproto/gogo.proto"; +import "google/api/annotations.proto"; +import "client/x/epochs/types/genesis.proto"; + +option go_package = "client/x/epochs/types"; + +// Query defines the gRPC querier service. +service Query { + // EpochInfos provide running epochInfos + rpc EpochInfos(QueryEpochsInfoRequest) returns (QueryEpochsInfoResponse) { + option (google.api.http).get = "/client/epochs/v1/epochs"; + } + // CurrentEpoch provide current epoch of specified identifier + rpc CurrentEpoch(QueryCurrentEpochRequest) returns (QueryCurrentEpochResponse) { + option (google.api.http).get = "/client/epochs/v1/current_epoch"; + } +} + +message QueryEpochsInfoRequest {} +message QueryEpochsInfoResponse { + repeated EpochInfo epochs = 1 [(gogoproto.nullable) = false]; +} + +message QueryCurrentEpochRequest { + string identifier = 1; +} +message QueryCurrentEpochResponse { + int64 current_epoch = 1; +} \ No newline at end of file diff --git a/go.mod b/go.mod index 14816d3b..3c36fe7a 100644 --- a/go.mod +++ b/go.mod @@ -233,7 +233,7 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.26.0 // indirect - golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f // indirect + golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f golang.org/x/mod v0.20.0 // indirect golang.org/x/net v0.28.0 // indirect golang.org/x/sys v0.23.0 // indirect diff --git a/lib/netconf/local/genesis.json b/lib/netconf/local/genesis.json index 17e9ef01..22c1e0a0 100644 --- a/lib/netconf/local/genesis.json +++ b/lib/netconf/local/genesis.json @@ -191,6 +191,19 @@ "goal_bonded": "0.670000000000000000", "blocks_per_year": "6311520" } + }, + "epochs": { + "epochs": [ + { + "identifier": "minute", + "start_time": "1970-01-01T00:00:00Z", + "duration": "60s", + "current_epoch": "0", + "current_epoch_start_time": "1970-01-01T00:00:00Z", + "epoch_counting_started": false, + "current_epoch_start_height": "10" + } + ] } } } From 2c55d0d85b389d537fd0eba534c566aa2f15df73 Mon Sep 17 00:00:00 2001 From: Narangde Date: Wed, 18 Sep 2024 09:29:16 +0900 Subject: [PATCH 02/29] chore: modify path for mockgen (#121) --- .../testutil/expected_keepers_mocks.go | 4 +- .../testutil/expected_keepers_mocks.go | 42 +++---------------- scripts/mockgen.sh | 4 +- 3 files changed, 10 insertions(+), 40 deletions(-) diff --git a/client/x/evmengine/testutil/expected_keepers_mocks.go b/client/x/evmengine/testutil/expected_keepers_mocks.go index 55352c99..3d40b6cc 100644 --- a/client/x/evmengine/testutil/expected_keepers_mocks.go +++ b/client/x/evmengine/testutil/expected_keepers_mocks.go @@ -1,9 +1,9 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: client/x/evmengine/types/expected_keepers.go +// Source: ../client/x/evmengine/types/expected_keepers.go // // Generated by this command: // -// mockgen -source=client/x/evmengine/types/expected_keepers.go -package testutil -destination client/x/evmengine/testutil/expected_keepers_mocks.go +// mockgen -source=../client/x/evmengine/types/expected_keepers.go -package testutil -destination ../client/x/evmengine/testutil/expected_keepers_mocks.go // // Package testutil is a generated GoMock package. diff --git a/client/x/evmstaking/testutil/expected_keepers_mocks.go b/client/x/evmstaking/testutil/expected_keepers_mocks.go index ab85d653..cae3d6d8 100644 --- a/client/x/evmstaking/testutil/expected_keepers_mocks.go +++ b/client/x/evmstaking/testutil/expected_keepers_mocks.go @@ -1,9 +1,9 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: client/x/evmstaking/types/expected_keepers.go +// Source: ../client/x/evmstaking/types/expected_keepers.go // // Generated by this command: // -// mockgen -source=client/x/evmstaking/types/expected_keepers.go -package testutil -destination client/x/evmstaking/testutil/expected_keepers_mocks.go +// mockgen -source=../client/x/evmstaking/types/expected_keepers.go -package testutil -destination ../client/x/evmstaking/testutil/expected_keepers_mocks.go // // Package testutil is a generated GoMock package. @@ -155,15 +155,15 @@ func (mr *MockAccountKeeperMockRecorder) SetAccount(ctx, acc any) *gomock.Call { } // SetModuleAccount mocks base method. -func (m *MockAccountKeeper) SetModuleAccount(arg0 context.Context, arg1 types0.ModuleAccountI) { +func (m *MockAccountKeeper) SetModuleAccount(ctx context.Context, modAcc types0.ModuleAccountI) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SetModuleAccount", arg0, arg1) + m.ctrl.Call(m, "SetModuleAccount", ctx, modAcc) } // SetModuleAccount indicates an expected call of SetModuleAccount. -func (mr *MockAccountKeeperMockRecorder) SetModuleAccount(arg0, arg1 any) *gomock.Call { +func (mr *MockAccountKeeperMockRecorder) SetModuleAccount(ctx, modAcc any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetModuleAccount", reflect.TypeOf((*MockAccountKeeper)(nil).SetModuleAccount), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetModuleAccount", reflect.TypeOf((*MockAccountKeeper)(nil).SetModuleAccount), ctx, modAcc) } // MockBankKeeper is a mock of BankKeeper interface. @@ -395,21 +395,6 @@ func (mr *MockStakingKeeperMockRecorder) BondDenom(ctx any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BondDenom", reflect.TypeOf((*MockStakingKeeper)(nil).BondDenom), ctx) } -// CompleteRedelegation mocks base method. -func (m *MockStakingKeeper) CompleteRedelegation(ctx context.Context, delAddr types0.AccAddress, valSrcAddr, valDstAddr types0.ValAddress) (types0.Coins, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CompleteRedelegation", ctx, delAddr, valSrcAddr, valDstAddr) - ret0, _ := ret[0].(types0.Coins) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CompleteRedelegation indicates an expected call of CompleteRedelegation. -func (mr *MockStakingKeeperMockRecorder) CompleteRedelegation(ctx, delAddr, valSrcAddr, valDstAddr any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CompleteRedelegation", reflect.TypeOf((*MockStakingKeeper)(nil).CompleteRedelegation), ctx, delAddr, valSrcAddr, valDstAddr) -} - // DeleteUnbondingIndex mocks base method. func (m *MockStakingKeeper) DeleteUnbondingIndex(ctx context.Context, id uint64) error { m.ctrl.T.Helper() @@ -424,21 +409,6 @@ func (mr *MockStakingKeeperMockRecorder) DeleteUnbondingIndex(ctx, id any) *gomo return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUnbondingIndex", reflect.TypeOf((*MockStakingKeeper)(nil).DeleteUnbondingIndex), ctx, id) } -// DequeueAllMatureRedelegationQueue mocks base method. -func (m *MockStakingKeeper) DequeueAllMatureRedelegationQueue(ctx context.Context, currTime time.Time) ([]types2.DVVTriplet, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DequeueAllMatureRedelegationQueue", ctx, currTime) - ret0, _ := ret[0].([]types2.DVVTriplet) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DequeueAllMatureRedelegationQueue indicates an expected call of DequeueAllMatureRedelegationQueue. -func (mr *MockStakingKeeperMockRecorder) DequeueAllMatureRedelegationQueue(ctx, currTime any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DequeueAllMatureRedelegationQueue", reflect.TypeOf((*MockStakingKeeper)(nil).DequeueAllMatureRedelegationQueue), ctx, currTime) -} - // EndBlocker mocks base method. func (m *MockStakingKeeper) EndBlocker(ctx context.Context) ([]types.ValidatorUpdate, error) { m.ctrl.T.Helper() diff --git a/scripts/mockgen.sh b/scripts/mockgen.sh index bd9ddab0..b5ca0c94 100644 --- a/scripts/mockgen.sh +++ b/scripts/mockgen.sh @@ -3,5 +3,5 @@ mockgen_cmd="mockgen" $mockgen_cmd -package mock -destination testutil/mock/grpc_server.go github.com/cosmos/gogoproto/grpc Server $mockgen_cmd -package mock -destination testutil/mock/logger.go cosmossdk.io/log Logger -$mockgen_cmd -source=client/x/evmengine/types/expected_keepers.go -package testutil -destination client/x/evmengine/testutil/expected_keepers_mocks.go -$mockgen_cmd -source=client/x/evmstaking/types/expected_keepers.go -package testutil -destination client/x/evmstaking/testutil/expected_keepers_mocks.go +$mockgen_cmd -source=../client/x/evmengine/types/expected_keepers.go -package testutil -destination ../client/x/evmengine/testutil/expected_keepers_mocks.go +$mockgen_cmd -source=../client/x/evmstaking/types/expected_keepers.go -package testutil -destination ../client/x/evmstaking/testutil/expected_keepers_mocks.go From 6858cd45a7b15cedbff54335669ea078d65a0323 Mon Sep 17 00:00:00 2001 From: Narangde Date: Wed, 18 Sep 2024 11:19:25 +0900 Subject: [PATCH 03/29] chore(evmstaking): remove unnecessary codes (#125) --- client/x/evmstaking/keeper/abci.go | 6 ------ 1 file changed, 6 deletions(-) diff --git a/client/x/evmstaking/keeper/abci.go b/client/x/evmstaking/keeper/abci.go index da647b45..348e747a 100644 --- a/client/x/evmstaking/keeper/abci.go +++ b/client/x/evmstaking/keeper/abci.go @@ -38,12 +38,6 @@ func (k *Keeper) EndBlock(ctx context.Context) (abci.ValidatorUpdates, error) { return nil, err } - delegations, err := k.stakingKeeper.GetAllDelegations(ctx) - if err != nil { - return nil, err - } - log.Debug(ctx, "All delegations", "count", len(delegations)) - // make an array with each entry being the validator address, delegator address, and the amount var unbondedEntries []UnbondedEntry From eef53b02e93ed72653e8aee0af59fbbe238e855f Mon Sep 17 00:00:00 2001 From: zsystm Date: Wed, 11 Sep 2024 14:43:29 +0900 Subject: [PATCH 04/29] add test cases for evmstaking/types/params increased test coverage to 100% changes validate functions use concrete type instead of any. there is no reasoning for using any. rename withdraw test suite to avoid name conflict with param test suite --- client/x/evmstaking/types/params.go | 29 +-- client/x/evmstaking/types/params_test.go | 235 +++++++++++++++++++++ client/x/evmstaking/types/withdraw_test.go | 2 +- 3 files changed, 243 insertions(+), 23 deletions(-) create mode 100644 client/x/evmstaking/types/params_test.go diff --git a/client/x/evmstaking/types/params.go b/client/x/evmstaking/types/params.go index 30191e5c..b9a8e6ae 100644 --- a/client/x/evmstaking/types/params.go +++ b/client/x/evmstaking/types/params.go @@ -55,12 +55,7 @@ func UnmarshalParams(cdc *codec.LegacyAmino, value []byte) (params Params, err e return params, nil } -func ValidateMaxWithdrawalPerBlock(i any) error { - v, ok := i.(uint32) - if !ok { - return fmt.Errorf("invalid parameter type: %T", i) - } - +func ValidateMaxWithdrawalPerBlock(v uint32) error { if v == 0 { return fmt.Errorf("max withdrawal per block must be positive: %d", v) } @@ -68,29 +63,19 @@ func ValidateMaxWithdrawalPerBlock(i any) error { return nil } -func ValidateMaxSweepPerBlock(i any, maxWithdrawalPerBlock uint32) error { - v, ok := i.(uint32) - if !ok { - return fmt.Errorf("invalid parameter type: %T", i) +func ValidateMaxSweepPerBlock(maxSweepPerBlock uint32, maxWithdrawalPerBlock uint32) error { + if maxSweepPerBlock == 0 { + return fmt.Errorf("max sweep per block must be positive: %d", maxSweepPerBlock) } - if v == 0 { - return fmt.Errorf("max sweep per block must be positive: %d", v) - } - - if v < maxWithdrawalPerBlock { - return fmt.Errorf("max sweep per block must be greater than or equal to max withdrawal per block: %d < %d", v, maxWithdrawalPerBlock) + if maxSweepPerBlock < maxWithdrawalPerBlock { + return fmt.Errorf("max sweep per block must be greater than or equal to max withdrawal per block: %d < %d", maxSweepPerBlock, maxWithdrawalPerBlock) } return nil } -func ValidateMinPartialWithdrawalAmount(i any) error { - v, ok := i.(uint64) - if !ok { - return fmt.Errorf("invalid parameter type: %T", i) - } - +func ValidateMinPartialWithdrawalAmount(v uint64) error { if v == 0 { return fmt.Errorf("min partial withdrawal amount must be positive: %d", v) } diff --git a/client/x/evmstaking/types/params_test.go b/client/x/evmstaking/types/params_test.go new file mode 100644 index 00000000..dab1c8cd --- /dev/null +++ b/client/x/evmstaking/types/params_test.go @@ -0,0 +1,235 @@ +package types_test + +import ( + "testing" + + "github.com/cosmos/cosmos-sdk/types/module/testutil" + "github.com/stretchr/testify/suite" + + "github.com/piplabs/story/client/x/evmstaking/types" +) + +type ParamsTestSuite struct { + suite.Suite + encConf testutil.TestEncodingConfig +} + +func (suite *ParamsTestSuite) SetupTest() { + suite.encConf = testutil.MakeTestEncodingConfig() +} + +func (suite *ParamsTestSuite) TestNewParams() { + require := suite.Require() + maxWithdrawalPerBlock, maxSweepPerBlock, minPartialWithdrawalAmount := uint32(1), uint32(2), uint64(3) + params := types.NewParams(maxWithdrawalPerBlock, maxSweepPerBlock, minPartialWithdrawalAmount) + // check values are set correctly + require.Equal(maxWithdrawalPerBlock, params.MaxWithdrawalPerBlock) + require.Equal(maxSweepPerBlock, params.MaxSweepPerBlock) + require.Equal(minPartialWithdrawalAmount, params.MinPartialWithdrawalAmount) +} + +func (suite *ParamsTestSuite) TestDefaultParams() { + require := suite.Require() + params := types.DefaultParams() + // check values are set correctly + require.Equal(types.DefaultMaxWithdrawalPerBlock, params.MaxWithdrawalPerBlock) + require.Equal(types.DefaultMaxSweepPerBlock, params.MaxSweepPerBlock) + require.Equal(types.DefaultMinPartialWithdrawalAmount, params.MinPartialWithdrawalAmount) +} + +func (suite *ParamsTestSuite) TestMustUnmarshalParams() { + require := suite.Require() + maxWithdrawalPerBlock, maxSweepPerBlock, minPartialWithdrawalAmount := uint32(1), uint32(2), uint64(3) + params := types.NewParams(maxWithdrawalPerBlock, maxSweepPerBlock, minPartialWithdrawalAmount) + + tcs := []struct { + name string + input []byte + expected types.Params + expectPanic bool + }{ + { + name: "Unmarshal valid params bytes", + input: suite.encConf.Codec.MustMarshal(¶ms), + expected: types.Params{ + MaxWithdrawalPerBlock: maxWithdrawalPerBlock, + MaxSweepPerBlock: maxSweepPerBlock, + MinPartialWithdrawalAmount: minPartialWithdrawalAmount, + }, + }, + { + name: "Unmarshal invalid params bytes", + input: []byte{0x1, 0x2, 0x3}, + expectPanic: true, + }, + } + + for _, tc := range tcs { + suite.Run(tc.name, func() { + if tc.expectPanic { + require.Panics(func() { + types.MustUnmarshalParams(suite.encConf.Amino, tc.input) + }) + } else { + params := types.MustUnmarshalParams(suite.encConf.Amino, tc.input) + require.Equal(tc.expected, params) + } + }) + } +} + +func (suite *ParamsTestSuite) TestUnmarshalParams() { + require := suite.Require() + maxWithdrawalPerBlock, maxSweepPerBlock, minPartialWithdrawalAmount := uint32(1), uint32(2), uint64(3) + params := types.NewParams(maxWithdrawalPerBlock, maxSweepPerBlock, minPartialWithdrawalAmount) + + tcs := []struct { + name string + input []byte + expected types.Params + expectedError string + }{ + { + name: "Unmarshal valid params bytes", + input: suite.encConf.Codec.MustMarshal(¶ms), + expected: types.Params{ + MaxWithdrawalPerBlock: maxWithdrawalPerBlock, + MaxSweepPerBlock: maxSweepPerBlock, + MinPartialWithdrawalAmount: minPartialWithdrawalAmount, + }, + }, + { + name: "Unmarshal invalid params bytes", + input: []byte{0x1, 0x2, 0x3}, + expectedError: "unmarshal params", + }, + } + + for _, tc := range tcs { + suite.Run(tc.name, func() { + params, err := types.UnmarshalParams(suite.encConf.Amino, tc.input) + if tc.expectedError != "" { + require.Error(err) + require.Contains(err.Error(), tc.expectedError) + } else { + require.NoError(err) + require.Equal(tc.expected, params) + } + }) + } +} + +func (suite *ParamsTestSuite) TestValidateMaxWithdrawalPerBlock() { + require := suite.Require() + + tcs := []struct { + name string + input uint32 + expectedErr string + }{ + { + name: "valid value", + input: 1, + }, + { + name: "invalid value", + input: 0, + expectedErr: "max withdrawal per block must be positive: 0", + }, + } + + for _, tc := range tcs { + suite.Run(tc.name, func() { + err := types.ValidateMaxWithdrawalPerBlock(tc.input) + if tc.expectedErr == "" { + require.NoError(err) + } else { + require.Error(err) + require.Contains(err.Error(), tc.expectedErr) + } + }) + } +} + +func (suite *ParamsTestSuite) TestValidateMaxSweepPerBlock() { + require := suite.Require() + + tcs := []struct { + name string + maxSweepPerBlock uint32 + maxWithdrawalPerBlock uint32 + expectedErr string + }{ + { + name: "valid value", + maxSweepPerBlock: 2, + maxWithdrawalPerBlock: 1, + }, + { + name: "valid value", + maxSweepPerBlock: 1, + maxWithdrawalPerBlock: 1, + }, + { + name: "invalid value", + maxSweepPerBlock: 0, + maxWithdrawalPerBlock: 2, + expectedErr: "max sweep per block must be positive: 0", + }, + { + name: "invalid value", + maxSweepPerBlock: 1, + maxWithdrawalPerBlock: 2, + expectedErr: "max sweep per block must be greater than or equal to max withdrawal per block", + }, + } + + for _, tc := range tcs { + suite.Run(tc.name, func() { + err := types.ValidateMaxSweepPerBlock(tc.maxSweepPerBlock, tc.maxWithdrawalPerBlock) + if tc.expectedErr == "" { + require.NoError(err) + } else { + require.Error(err) + require.Contains(err.Error(), tc.expectedErr) + } + }) + } +} + +func (suite *ParamsTestSuite) TestValidateMinPartialWithdrawatAmount() { + require := suite.Require() + + tcs := []struct { + name string + input uint64 + expectedErr string + }{ + { + name: "valid value", + input: 1, + }, + { + name: "invalid value", + input: 0, + expectedErr: "min partial withdrawal amount must be positive: 0", + }, + } + + for _, tc := range tcs { + suite.Run(tc.name, func() { + err := types.ValidateMinPartialWithdrawalAmount(tc.input) + if tc.expectedErr == "" { + require.NoError(err) + } else { + require.Error(err) + require.Contains(err.Error(), tc.expectedErr) + } + }) + } +} + +func TestParamsTestSuite(t *testing.T) { + t.Parallel() + suite.Run(t, new(ParamsTestSuite)) +} diff --git a/client/x/evmstaking/types/withdraw_test.go b/client/x/evmstaking/types/withdraw_test.go index 16b15cf3..db181212 100644 --- a/client/x/evmstaking/types/withdraw_test.go +++ b/client/x/evmstaking/types/withdraw_test.go @@ -196,7 +196,7 @@ func (suite *WithdrawTestSuite) TestMustUnmarshalWithdraw() { } } -func TestTestSuite(t *testing.T) { +func TestWithdrawalTestSuite(t *testing.T) { t.Parallel() suite.Run(t, new(WithdrawTestSuite)) } From aa719556aa4410ef5edb529203f6ec5ecc2b619f Mon Sep 17 00:00:00 2001 From: zsystm Date: Wed, 11 Sep 2024 15:32:41 +0900 Subject: [PATCH 05/29] add test cases for evmstaking/keeper/unjail increased test coverage to 100% --- client/x/evmstaking/keeper/unjail_test.go | 109 ++++++++++++++++++++++ 1 file changed, 109 insertions(+) create mode 100644 client/x/evmstaking/keeper/unjail_test.go diff --git a/client/x/evmstaking/keeper/unjail_test.go b/client/x/evmstaking/keeper/unjail_test.go new file mode 100644 index 00000000..e8f01c24 --- /dev/null +++ b/client/x/evmstaking/keeper/unjail_test.go @@ -0,0 +1,109 @@ +package keeper_test + +import ( + "context" + + slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types" + "github.com/ethereum/go-ethereum/common" + gethtypes "github.com/ethereum/go-ethereum/core/types" + + "github.com/piplabs/story/client/x/evmstaking/types" + "github.com/piplabs/story/contracts/bindings" +) + +func (s *TestSuite) TestProcessUnjail() { + require := s.Require() + ctx, slashingKeeper, keeper := s.Ctx, s.SlashingKeeper, s.EVMStakingKeeper + pubKeys, _, valAddrs := createAddresses(1) + valAddr := valAddrs[0] + valPubKey := pubKeys[0] + + tcs := []struct { + name string + setupMock func(c context.Context) + unjailEv *bindings.IPTokenSlashingUnjail + expectedErr string + }{ + { + name: "pass: valid unjail event", + setupMock: func(c context.Context) { + slashingKeeper.EXPECT().Unjail(c, valAddr).Return(nil) + }, + unjailEv: &bindings.IPTokenSlashingUnjail{ + ValidatorCmpPubkey: valPubKey.Bytes(), + }, + }, + { + name: "fail: invalid validator pubkey", + unjailEv: &bindings.IPTokenSlashingUnjail{ + ValidatorCmpPubkey: valPubKey.Bytes()[10:], + }, + expectedErr: "validator pubkey to cosmos: invalid pubkey length", + }, + { + name: "fail: validator not jailed", + setupMock: func(c context.Context) { + // MOCK Unjail to return error. + slashingKeeper.EXPECT().Unjail(c, valAddr).Return(slashingtypes.ErrValidatorNotJailed) + }, + unjailEv: &bindings.IPTokenSlashingUnjail{ + ValidatorCmpPubkey: valPubKey.Bytes(), + }, + expectedErr: slashingtypes.ErrValidatorNotJailed.Error(), + }, + } + + for _, tc := range tcs { + s.Run(tc.name, func() { + cachedCtx, _ := ctx.CacheContext() + if tc.setupMock != nil { + tc.setupMock(cachedCtx) + } + err := keeper.ProcessUnjail(cachedCtx, tc.unjailEv) + if tc.expectedErr != "" { + require.ErrorContains(err, tc.expectedErr) + } else { + require.NoError(err) + } + }) + } +} + +func (s *TestSuite) TestParseUnjailLog() { + require := s.Require() + keeper := s.EVMStakingKeeper + + dummyEthAddr := common.HexToAddress("0x1") + + tcs := []struct { + name string + log gethtypes.Log + expectErr bool + }{ + { + name: "Unknown Topic", + log: gethtypes.Log{ + Topics: []common.Hash{common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")}, + }, + expectErr: true, + }, + { + name: "Valid Topic", + log: gethtypes.Log{ + Topics: []common.Hash{types.UnjailEvent.ID, common.BytesToHash(dummyEthAddr.Bytes())}, + }, + expectErr: false, + }, + } + + for _, tc := range tcs { + s.Run(tc.name, func() { + _, err := keeper.ParseUnjailLog(tc.log) + if tc.expectErr { + require.Error(err, "should return error for %s", tc.name) + } else { + require.NoError(err, "should not return error for %s", tc.name) + } + }) + } +} From 01397dc5d64808684b4f45f3c191e0b21c7b0448 Mon Sep 17 00:00:00 2001 From: zsystm Date: Wed, 11 Sep 2024 22:25:26 +0900 Subject: [PATCH 06/29] add test cases for genesis increased coverage to 71.4% --- client/x/evmstaking/keeper/genesis_test.go | 184 +++++++++++++++++++++ 1 file changed, 184 insertions(+) create mode 100644 client/x/evmstaking/keeper/genesis_test.go diff --git a/client/x/evmstaking/keeper/genesis_test.go b/client/x/evmstaking/keeper/genesis_test.go new file mode 100644 index 00000000..1d544eb6 --- /dev/null +++ b/client/x/evmstaking/keeper/genesis_test.go @@ -0,0 +1,184 @@ +package keeper_test + +import ( + "context" + + "github.com/piplabs/story/client/x/evmstaking/types" + "github.com/piplabs/story/lib/k1util" +) + +func (s *TestSuite) TestInitGenesis() { + require := s.Require() + ctx, keeper := s.Ctx, s.EVMStakingKeeper + + validMaxWithdrawalPerBlock := types.DefaultMaxWithdrawalPerBlock + 100 + validMaxsweepPerBlock := types.DefaultMaxSweepPerBlock + 100 + validMinPartialWithdrawalAmount := types.DefaultMinPartialWithdrawalAmount + 100 + validParams := types.Params{ + MaxWithdrawalPerBlock: validMaxWithdrawalPerBlock, + MaxSweepPerBlock: validMaxsweepPerBlock, + MinPartialWithdrawalAmount: validMinPartialWithdrawalAmount, + } + + // setup addresses and keys for testing + pubKeys, addrs, valAddrs := createAddresses(3) + delAddr := addrs[0] + delPubKey := pubKeys[0] + valAddr1 := valAddrs[1] + valAccAddr1 := addrs[1] + valPubKey1 := pubKeys[1] + valEvmAddr1, err := k1util.CosmosPubkeyToEVMAddress(valPubKey1.Bytes()) + require.NoError(err) + valAddr2 := valAddrs[2] + valAccAddr2 := addrs[2] + valPubKey2 := pubKeys[2] + valEvmAddr2, err := k1util.CosmosPubkeyToEVMAddress(valPubKey2.Bytes()) + require.NoError(err) + + tcs := []struct { + name string + setup func(c context.Context) + gs func() *types.GenesisState + postStateCheck func(c context.Context) + expectedError string + }{ + { + name: "pass: no validators", + gs: func() *types.GenesisState { + return &types.GenesisState{ + Params: validParams, + } + }, + postStateCheck: func(c context.Context) { + params, err := keeper.GetParams(c) + require.NoError(err) + require.Equal(validParams, params) + }, + }, + { + name: "pass: with validators", + setup: func(c context.Context) { + s.setupValidatorAndDelegation(c, valPubKey1, delPubKey, valAddr1, delAddr) + s.setupValidatorAndDelegation(c, valPubKey2, delPubKey, valAddr2, delAddr) + }, + gs: func() *types.GenesisState { + return &types.GenesisState{ + Params: validParams, + } + }, + postStateCheck: func(c context.Context) { + params, err := keeper.GetParams(c) + require.NoError(err) + require.Equal(validParams, params) + + // check delegator map + evmAddr1, err := keeper.DelegatorMap.Get(c, valAccAddr1.String()) + require.NoError(err) + require.Equal(valEvmAddr1.String(), evmAddr1) + evmAddr2, err := keeper.DelegatorMap.Get(c, valAccAddr2.String()) + require.NoError(err) + require.Equal(valEvmAddr2.String(), evmAddr2) + }, + }, + { + name: "fail: invalid params", + gs: func() *types.GenesisState { + invalidParams := validParams + // make params invalid + invalidParams.MaxWithdrawalPerBlock = 0 + + return &types.GenesisState{ + Params: invalidParams, + } + }, + expectedError: "max withdrawal per block must be positive", + }, + } + + for _, tc := range tcs { + s.Run(tc.name, func() { + cachedCtx, _ := ctx.CacheContext() + if tc.setup != nil { + tc.setup(cachedCtx) + } + err := keeper.InitGenesis(cachedCtx, tc.gs()) + if tc.expectedError != "" { + require.Error(err) + require.Contains(err.Error(), tc.expectedError) + } else { + require.NoError(err) + if tc.postStateCheck != nil { + tc.postStateCheck(cachedCtx) + } + } + }) + } +} + +func (s *TestSuite) TestExportGenesis() { + require := s.Require() + ctx, keeper := s.Ctx, s.EVMStakingKeeper + + validMaxWithdrawalPerBlock := types.DefaultMaxWithdrawalPerBlock + 100 + validMaxsweepPerBlock := types.DefaultMaxSweepPerBlock + 100 + validMinPartialWithdrawalAmount := types.DefaultMinPartialWithdrawalAmount + 100 + validParams := types.Params{ + MaxWithdrawalPerBlock: validMaxWithdrawalPerBlock, + MaxSweepPerBlock: validMaxsweepPerBlock, + MinPartialWithdrawalAmount: validMinPartialWithdrawalAmount, + } + + tcs := []struct { + name string + setup func(c context.Context) + expectedGenesis *types.GenesisState + }{ + { + name: "pass: case1", + setup: func(c context.Context) { + cpy := validParams + // modify params to test + cpy.MaxWithdrawalPerBlock += 100 + cpy.MaxSweepPerBlock += 100 + cpy.MinPartialWithdrawalAmount += 100 + require.NoError(keeper.SetParams(c, cpy)) + }, + expectedGenesis: &types.GenesisState{ + Params: types.Params{ + MaxWithdrawalPerBlock: validParams.MaxWithdrawalPerBlock + 100, + MaxSweepPerBlock: validParams.MaxSweepPerBlock + 100, + MinPartialWithdrawalAmount: validParams.MinPartialWithdrawalAmount + 100, + }, + }, + }, + { + name: "pass: case2", + setup: func(c context.Context) { + cpy := validParams + // modify params to test + cpy.MaxWithdrawalPerBlock += 2 + cpy.MaxSweepPerBlock += 2 + cpy.MinPartialWithdrawalAmount += 2 + require.NoError(keeper.SetParams(c, cpy)) + }, + expectedGenesis: &types.GenesisState{ + Params: types.Params{ + MaxWithdrawalPerBlock: validParams.MaxWithdrawalPerBlock + 2, + MaxSweepPerBlock: validParams.MaxSweepPerBlock + 2, + MinPartialWithdrawalAmount: validParams.MinPartialWithdrawalAmount + 2, + }, + }, + }, + } + + for _, tc := range tcs { + s.Run(tc.name, func() { + cachedCtx, _ := ctx.CacheContext() + if tc.setup != nil { + tc.setup(cachedCtx) + } + genesis := keeper.ExportGenesis(cachedCtx) + require.Equal(tc.expectedGenesis, genesis) + }) + } +} From ccd58343bcbfcba3d01c833694896d065e091d06 Mon Sep 17 00:00:00 2001 From: zsystm Date: Thu, 19 Sep 2024 15:32:52 +0900 Subject: [PATCH 07/29] fix syntax after rebase --- client/x/evmstaking/keeper/genesis_test.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/client/x/evmstaking/keeper/genesis_test.go b/client/x/evmstaking/keeper/genesis_test.go index 1d544eb6..03b9fb72 100644 --- a/client/x/evmstaking/keeper/genesis_test.go +++ b/client/x/evmstaking/keeper/genesis_test.go @@ -34,6 +34,7 @@ func (s *TestSuite) TestInitGenesis() { valPubKey2 := pubKeys[2] valEvmAddr2, err := k1util.CosmosPubkeyToEVMAddress(valPubKey2.Bytes()) require.NoError(err) + valTokens := s.StakingKeeper.TokensFromConsensusPower(ctx, 10) tcs := []struct { name string @@ -58,8 +59,8 @@ func (s *TestSuite) TestInitGenesis() { { name: "pass: with validators", setup: func(c context.Context) { - s.setupValidatorAndDelegation(c, valPubKey1, delPubKey, valAddr1, delAddr) - s.setupValidatorAndDelegation(c, valPubKey2, delPubKey, valAddr2, delAddr) + s.setupValidatorAndDelegation(c, valPubKey1, delPubKey, valAddr1, delAddr, valTokens) + s.setupValidatorAndDelegation(c, valPubKey2, delPubKey, valAddr2, delAddr, valTokens) }, gs: func() *types.GenesisState { return &types.GenesisState{ From 4325f158649ae677ca0d60eddbe759e8d8688fb4 Mon Sep 17 00:00:00 2001 From: zsystm <124245155+zsystm@users.noreply.github.com> Date: Fri, 20 Sep 2024 10:40:59 +0900 Subject: [PATCH 08/29] add test cases for genesis (#112) increase coverage to 100% --- client/x/evmstaking/types/genesis_test.go | 65 +++++++++++++++++++++++ 1 file changed, 65 insertions(+) create mode 100644 client/x/evmstaking/types/genesis_test.go diff --git a/client/x/evmstaking/types/genesis_test.go b/client/x/evmstaking/types/genesis_test.go new file mode 100644 index 00000000..093f0d94 --- /dev/null +++ b/client/x/evmstaking/types/genesis_test.go @@ -0,0 +1,65 @@ +package types_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/piplabs/story/client/x/evmstaking/types" +) + +var zeroVallidatorSweepIndex = &types.ValidatorSweepIndex{ + NextValIndex: 0, + NextValDelIndex: 0, +} + +func TestNewGenesisState(t *testing.T) { + t.Parallel() + tcs := []struct { + name string + params types.Params + expectedGenesisState *types.GenesisState + }{ + { + name: "default params", + params: types.DefaultParams(), + expectedGenesisState: &types.GenesisState{ + Params: types.DefaultParams(), + ValidatorSweepIndex: zeroVallidatorSweepIndex, + }, + }, + { + name: "custom params", + params: types.NewParams( + 10, + 20, + 30, + ), + expectedGenesisState: &types.GenesisState{ + Params: types.NewParams( + 10, + 20, + 30, + ), + ValidatorSweepIndex: zeroVallidatorSweepIndex, + }, + }, + } + + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + got := types.NewGenesisState(tc.params) + require.Equal(t, tc.expectedGenesisState, got) + }) + } +} + +func TestDefaultGenesisState(t *testing.T) { + t.Parallel() + expectedGenesisState := &types.GenesisState{ + Params: types.DefaultParams(), + ValidatorSweepIndex: zeroVallidatorSweepIndex, + } + require.Equal(t, expectedGenesisState, types.DefaultGenesisState()) +} From ed00e1a87b1c635c61a4a6c7449c5956ca99ef13 Mon Sep 17 00:00:00 2001 From: zsystm <124245155+zsystm@users.noreply.github.com> Date: Fri, 20 Sep 2024 10:47:24 +0900 Subject: [PATCH 09/29] fix(evmstaking): query withdrawal queue (#113) it should append QueueElementsPrefixSuffix when query elements of queue it fixes https://github.com/piplabs/story/issues/84 --- client/x/evmstaking/keeper/grpc_query.go | 3 +- client/x/evmstaking/keeper/grpc_query_test.go | 63 +++++++++++++++++++ client/x/evmstaking/keeper/keeper_test.go | 5 ++ 3 files changed, 70 insertions(+), 1 deletion(-) create mode 100644 client/x/evmstaking/keeper/grpc_query_test.go diff --git a/client/x/evmstaking/keeper/grpc_query.go b/client/x/evmstaking/keeper/grpc_query.go index 13c942bd..7b1b13f2 100644 --- a/client/x/evmstaking/keeper/grpc_query.go +++ b/client/x/evmstaking/keeper/grpc_query.go @@ -8,6 +8,7 @@ import ( "github.com/cosmos/cosmos-sdk/runtime" "github.com/cosmos/cosmos-sdk/types/query" + "github.com/piplabs/story/client/collections" "github.com/piplabs/story/client/x/evmstaking/types" "google.golang.org/grpc/codes" @@ -37,7 +38,7 @@ func (k Keeper) GetWithdrawalQueue(ctx context.Context, request *types.QueryGetW } store := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) - wqStore := prefix.NewStore(store, types.WithdrawalQueueKey) // withdrawal queue store + wqStore := prefix.NewStore(store, append(types.WithdrawalQueueKey, collections.QueueElementsPrefixSuffix)) // withdrawal queue store withdrawals, pageResp, err := query.GenericFilteredPaginate(k.cdc, wqStore, request.Pagination, func(_ []byte, wit *types.Withdrawal) (*types.Withdrawal, error) { return wit, nil diff --git a/client/x/evmstaking/keeper/grpc_query_test.go b/client/x/evmstaking/keeper/grpc_query_test.go new file mode 100644 index 00000000..25d15a12 --- /dev/null +++ b/client/x/evmstaking/keeper/grpc_query_test.go @@ -0,0 +1,63 @@ +package keeper_test + +import ( + "context" + + "github.com/cosmos/cosmos-sdk/types/query" + "github.com/ethereum/go-ethereum/common" + + "github.com/piplabs/story/client/x/evmstaking/types" +) + +func (s *TestSuite) TestGetWithdrawalQueue() { + require := s.Require() + ctx, keeper, queryClient := s.Ctx, s.EVMStakingKeeper, s.queryClient + require.NoError(keeper.WithdrawalQueue.Initialize(ctx)) + + pageReq := &query.PageRequest{ + Key: nil, + Limit: 2, + CountTotal: true, + } + req := &types.QueryGetWithdrawalQueueRequest{ + Pagination: pageReq, + } + + // Query an empty queue + res, err := queryClient.GetWithdrawalQueue(context.Background(), req) + require.NoError(err) + require.Equal(0, len(res.Withdrawals), "expected no withdrawals in the queue yet") + + // Prepare and add three withdrawals to the queue + delAddr = "story1hmjw3pvkjtndpg8wqppwdn8udd835qpan4hm0y" + valAddr = "storyvaloper1hmjw3pvkjtndpg8wqppwdn8udd835qpaa6r6y0" + evmAddr = common.HexToAddress("0x131D25EDE18178BAc9275b312001a63C081722d2") + withdrawals := []types.Withdrawal{ + types.NewWithdrawal(1, delAddr, valAddr, evmAddr.String(), 100), + types.NewWithdrawal(2, delAddr, valAddr, evmAddr.String(), 200), + types.NewWithdrawal(3, delAddr, valAddr, evmAddr.String(), 300), + } + require.Len(withdrawals, 3) + for _, w := range withdrawals { + err = keeper.AddWithdrawalToQueue(ctx, w) + require.NoError(err) + } + + // Query the first page of two withdrawals + res, err = queryClient.GetWithdrawalQueue(context.Background(), req) + require.NoError(err) + require.Equal(2, len(res.Withdrawals), + "expected 2 withdrawals after first page query, but found %d", len(res.Withdrawals)) + + // Query the next page for the remaining withdrawal + nextPage := res.Pagination.NextKey + require.NotNil(nextPage, "expected a next page key to be not nil") + + pageReq.Key = nextPage + req = &types.QueryGetWithdrawalQueueRequest{ + Pagination: pageReq, + } + res, err = queryClient.GetWithdrawalQueue(context.Background(), req) + require.NoError(err) + require.Equal(1, len(res.Withdrawals), "expected 1 withdrawal after second page query, but found %d", len(res.Withdrawals)) +} diff --git a/client/x/evmstaking/keeper/keeper_test.go b/client/x/evmstaking/keeper/keeper_test.go index 58c1018a..aee98e5b 100644 --- a/client/x/evmstaking/keeper/keeper_test.go +++ b/client/x/evmstaking/keeper/keeper_test.go @@ -15,6 +15,7 @@ import ( "github.com/cometbft/cometbft/crypto" cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" dbm "github.com/cosmos/cosmos-db" + "github.com/cosmos/cosmos-sdk/baseapp" "github.com/cosmos/cosmos-sdk/codec" "github.com/cosmos/cosmos-sdk/codec/address" codectypes "github.com/cosmos/cosmos-sdk/codec/types" @@ -61,6 +62,7 @@ type TestSuite struct { SlashingKeeper *estestutil.MockSlashingKeeper EVMStakingKeeper *keeper.Keeper msgServer types.MsgServiceServer + queryClient types.QueryClient encCfg moduletestutil.TestEncodingConfig } @@ -142,6 +144,9 @@ func (s *TestSuite) SetupTest() { s.Require().NoError(evmstakingKeeper.SetParams(s.Ctx, types.DefaultParams())) s.EVMStakingKeeper = evmstakingKeeper s.msgServer = keeper.NewMsgServerImpl(evmstakingKeeper) + queryHelper := baseapp.NewQueryServerTestHelper(s.Ctx, s.encCfg.InterfaceRegistry) + types.RegisterQueryServer(queryHelper, evmstakingKeeper) + s.queryClient = types.NewQueryClient(queryHelper) } func (s *TestSuite) TestLogger() { From 70adc1e92462b435eff5c33cb98be8c886f17e40 Mon Sep 17 00:00:00 2001 From: zsystm <124245155+zsystm@users.noreply.github.com> Date: Mon, 23 Sep 2024 14:46:33 +0900 Subject: [PATCH 10/29] test(evmengine): add test cases for db (#120) * add test cases to db --- client/x/evmengine/keeper/db_internal_test.go | 98 +++++++++++++++++++ 1 file changed, 98 insertions(+) create mode 100644 client/x/evmengine/keeper/db_internal_test.go diff --git a/client/x/evmengine/keeper/db_internal_test.go b/client/x/evmengine/keeper/db_internal_test.go new file mode 100644 index 00000000..a34a784d --- /dev/null +++ b/client/x/evmengine/keeper/db_internal_test.go @@ -0,0 +1,98 @@ +package keeper + +import ( + "testing" + + sdk "github.com/cosmos/cosmos-sdk/types" + authtx "github.com/cosmos/cosmos-sdk/x/auth/tx" + "github.com/ethereum/go-ethereum/beacon/engine" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + moduletestutil "github.com/piplabs/story/client/x/evmengine/testutil" + "github.com/piplabs/story/lib/ethclient/mock" + + "go.uber.org/mock/gomock" +) + +func createTestKeeper(t *testing.T) (sdk.Context, *Keeper) { + t.Helper() + cdc := getCodec(t) + txConfig := authtx.NewTxConfig(cdc, nil) + + mockEngine, err := newMockEngineAPI(0) + require.NoError(t, err) + + ctrl := gomock.NewController(t) + mockClient := mock.NewMockClient(ctrl) + ak := moduletestutil.NewMockAccountKeeper(ctrl) + esk := moduletestutil.NewMockEvmStakingKeeper(ctrl) + uk := moduletestutil.NewMockUpgradeKeeper(ctrl) + ctx, storeService := setupCtxStore(t, nil) + ctx = ctx.WithExecMode(sdk.ExecModeFinalize) + keeper, err := NewKeeper(cdc, storeService, &mockEngine, mockClient, txConfig, ak, esk, uk) + require.NoError(t, err) + + return ctx, keeper +} + +func TestKeeper_InsertGenesisHead(t *testing.T) { + t.Parallel() + + ctx, keeper := createTestKeeper(t) + + // make sure the execution head does not exist + _, err := keeper.getExecutionHead(ctx) + require.Error(t, err, "execution head should not exist") + + // insert genesis head + dummyBlockHash := []byte("test") + err = keeper.InsertGenesisHead(ctx, dummyBlockHash) + require.NoError(t, err) + + // make sure the execution head is set correctly + head, err := keeper.getExecutionHead(ctx) + require.NoError(t, err) + require.NotNil(t, head, "execution head should exist") + require.Equal(t, dummyBlockHash, head.GetBlockHash(), "block hash should match") + + // next try should fail because the genesis head already exists + err = keeper.InsertGenesisHead(ctx, []byte("another hash")) + require.Error(t, err, "genesis head should already exist") +} + +func TestKeeper_updateExecutionHead(t *testing.T) { + t.Parallel() + + ctx, keeper := createTestKeeper(t) + + // make sure the execution head does not exist + _, err := keeper.getExecutionHead(ctx) + require.Error(t, err, "execution head should not exist") + + // insert genesis head + dummyBlockHash := []byte("test") + err = keeper.InsertGenesisHead(ctx, dummyBlockHash) + require.NoError(t, err) + + // make sure the execution head is set correctly + head, err := keeper.getExecutionHead(ctx) + require.NoError(t, err) + require.NotNil(t, head, "execution head should exist") + + // update the execution head + newBlockHash := common.BytesToHash([]byte("new hash")) + err = keeper.updateExecutionHead(ctx, engine.ExecutableData{ + Number: 100, + BlockHash: newBlockHash, + Timestamp: 0, + }) + require.NoError(t, err) + + // make sure the execution head is updated correctly + head, err = keeper.getExecutionHead(ctx) + require.NoError(t, err) + require.NotNil(t, head, "execution head should exist") + require.Equal(t, newBlockHash.Bytes(), head.GetBlockHash(), "block hash should match") + require.Equal(t, uint64(100), head.GetBlockHeight(), "block height should match") +} From fdf12b7244dfdbae06e240387d84fd2052fa49da Mon Sep 17 00:00:00 2001 From: zsystm <124245155+zsystm@users.noreply.github.com> Date: Mon, 23 Sep 2024 14:56:38 +0900 Subject: [PATCH 11/29] test(evmengine): add test cases for helper (#131) * add test cases for helper increased test coverage to 100% * fix ci --- .../evmengine/keeper/helpers_internal_test.go | 71 +++++++++++++++++++ 1 file changed, 71 insertions(+) create mode 100644 client/x/evmengine/keeper/helpers_internal_test.go diff --git a/client/x/evmengine/keeper/helpers_internal_test.go b/client/x/evmengine/keeper/helpers_internal_test.go new file mode 100644 index 00000000..f92fe363 --- /dev/null +++ b/client/x/evmengine/keeper/helpers_internal_test.go @@ -0,0 +1,71 @@ +package keeper + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestRetryForever(t *testing.T) { + t.Parallel() + attempts := 0 + + tests := []struct { + name string + ctxFunc func() context.Context + fn func(ctx context.Context) (bool, error) + expectedErr string + expectedAttempts int + }{ + { + name: "Success after retries", + ctxFunc: context.Background, + fn: func(ctx context.Context) (bool, error) { + attempts++ + if attempts < 3 { + return false, nil // Retry + } + + return true, nil // Success + }, + expectedErr: "", + expectedAttempts: 3, + }, + { + name: "Context canceled", + ctxFunc: func() context.Context { + ctx, cancel := context.WithCancel(context.Background()) + cancel() // Cancel immediately + + return ctx + }, + fn: func(ctx context.Context) (bool, error) { + return false, nil + }, + expectedErr: "retry canceled", + }, + { + name: "Func returns error", + ctxFunc: context.Background, + fn: func(ctx context.Context) (bool, error) { + return false, errors.New("some error") + }, + expectedErr: "some error", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + err := retryForever(tc.ctxFunc(), tc.fn) + if tc.expectedErr != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tc.expectedErr) + } else { + require.NoError(t, err) + } + }) + } +} From 25583ff917c8154afc042849d7d7ed8236e5c2ac Mon Sep 17 00:00:00 2001 From: zsystm <124245155+zsystm@users.noreply.github.com> Date: Tue, 24 Sep 2024 14:36:05 +0900 Subject: [PATCH 12/29] add test cases for keeper (#132) increased test coverage to 89.7% add ci rule to not use t.Parallel to avoid data race issue for cosmos orm table. --- .golangci.yml | 1 + .../evmengine/keeper/keeper_internal_test.go | 271 ++++++++++++++++-- lib/ethclient/enginemock.go | 8 +- 3 files changed, 245 insertions(+), 35 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index e52e16f8..ef08cb01 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -159,3 +159,4 @@ linters: - varnamelen # False positives - wsl # Way to strict and opinionated - lll # Disable rigid line length limit + - tparallel # Disable test parallelization diff --git a/client/x/evmengine/keeper/keeper_internal_test.go b/client/x/evmengine/keeper/keeper_internal_test.go index e06875a6..311c7557 100644 --- a/client/x/evmengine/keeper/keeper_internal_test.go +++ b/client/x/evmengine/keeper/keeper_internal_test.go @@ -2,34 +2,266 @@ package keeper import ( "context" + "encoding/json" "testing" "time" k1 "github.com/cometbft/cometbft/crypto/secp256k1" cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" cmttypes "github.com/cometbft/cometbft/types" + sdk "github.com/cosmos/cosmos-sdk/types" authtx "github.com/cosmos/cosmos-sdk/x/auth/tx" + "github.com/ethereum/go-ethereum/beacon/engine" + "github.com/ethereum/go-ethereum/common" fuzz "github.com/google/gofuzz" "github.com/stretchr/testify/require" "github.com/piplabs/story/client/comet" moduletestutil "github.com/piplabs/story/client/x/evmengine/testutil" + "github.com/piplabs/story/client/x/evmengine/types" "github.com/piplabs/story/lib/errors" + "github.com/piplabs/story/lib/ethclient" "github.com/piplabs/story/lib/ethclient/mock" "github.com/piplabs/story/lib/k1util" "go.uber.org/mock/gomock" ) -func TestKeeper_isNextProposer(t *testing.T) { +type args struct { + height int64 + validatorsFunc func(context.Context, int64) (*cmttypes.ValidatorSet, bool, error) + current int + next int + header func(height int64, address []byte) cmtproto.Header +} + +func createKeeper(t *testing.T, args args) (sdk.Context, *mockCometAPI, *Keeper) { + t.Helper() + + cdc := getCodec(t) + txConfig := authtx.NewTxConfig(cdc, nil) + mockEngine, err := newMockEngineAPI(0) + require.NoError(t, err) + + cmtAPI := newMockCometAPI(t, args.validatorsFunc) + header := args.header(args.height, cmtAPI.validatorSet.Validators[args.current].Address) + + nxtAddr, err := k1util.PubKeyToAddress(cmtAPI.validatorSet.Validators[args.next].PubKey) + require.NoError(t, err) + + ctrl := gomock.NewController(t) + mockClient := mock.NewMockClient(ctrl) + ak := moduletestutil.NewMockAccountKeeper(ctrl) + esk := moduletestutil.NewMockEvmStakingKeeper(ctrl) + uk := moduletestutil.NewMockUpgradeKeeper(ctrl) + + ctx, storeService := setupCtxStore(t, &header) + keeper, err := NewKeeper(cdc, storeService, &mockEngine, mockClient, txConfig, ak, esk, uk) + require.NoError(t, err) + keeper.SetCometAPI(cmtAPI) + keeper.SetValidatorAddress(nxtAddr) + populateGenesisHead(ctx, t, keeper) + + return ctx, cmtAPI, keeper +} + +func TestKeeper_SetBuildDelay(t *testing.T) { + t.Parallel() + keeper := new(Keeper) + // check existing value + require.Equal(t, 0*time.Second, keeper.buildDelay) + // set new value + keeper.SetBuildDelay(10 * time.Second) + require.Equal(t, 10*time.Second, keeper.buildDelay) +} + +func TestKeeper_SetBuildOptimistic(t *testing.T) { + t.Parallel() + keeper := new(Keeper) + // check existing value + require.False(t, keeper.buildOptimistic) + // set new value + keeper.SetBuildOptimistic(true) + require.True(t, keeper.buildOptimistic) +} + +func TestKeeper_parseAndVerifyProposedPayload(t *testing.T) { t.Parallel() - type args struct { - height int64 - validatorsFunc func(context.Context, int64) (*cmttypes.ValidatorSet, bool, error) - current int - next int - header func(height int64, address []byte) cmtproto.Header + now := time.Now() + fuzzer := ethclient.NewFuzzer(now.Unix()) + ctx, _, keeper := createKeeper(t, args{ + height: 0, + current: 0, + next: 1, + header: func(height int64, address []byte) cmtproto.Header { + return cmtproto.Header{Height: height, ProposerAddress: address} + }, + }) + + tcs := []struct { + name string + setup func(context.Context) sdk.Context + msg func(context.Context) *types.MsgExecutionPayload + expectedErr string + }{ + { + name: "fail: unmarshal payload because of invalid json", + msg: func(_ context.Context) *types.MsgExecutionPayload { + return &types.MsgExecutionPayload{ExecutionPayload: []byte("invalid")} + }, + expectedErr: "unmarshal payload", + }, + { + name: "fail: payload number is not equal to head block height + 1", + msg: func(_ context.Context) *types.MsgExecutionPayload { + payload, err := ethclient.MakePayload(fuzzer, 100, uint64(now.Unix()), common.Hash{}, common.Address{}, common.Hash{}, &common.Hash{}) + require.NoError(t, err) + + marshaled, err := json.Marshal(payload) + require.NoError(t, err) + + return &types.MsgExecutionPayload{ExecutionPayload: marshaled} + }, + expectedErr: "invalid proposed payload number", + }, + { + name: "fail: payload parent hash is not equal to head hash", + msg: func(c context.Context) *types.MsgExecutionPayload { + execHead, err := keeper.getExecutionHead(c) + require.NoError(t, err) + + payload, err := ethclient.MakePayload(fuzzer, execHead.GetBlockHeight()+1, uint64(now.Unix()), common.Hash{}, common.Address{}, common.Hash{}, &common.Hash{}) + require.NoError(t, err) + + marshaled, err := json.Marshal(payload) + require.NoError(t, err) + + return &types.MsgExecutionPayload{ExecutionPayload: marshaled} + }, + expectedErr: "invalid proposed payload parent hash", + }, + { + name: "fail: invalid payload timestamp", + msg: func(c context.Context) *types.MsgExecutionPayload { + execHead, err := keeper.getExecutionHead(c) + require.NoError(t, err) + weekAgo := execHead.GetBlockTime() - 604800 + + payload, err := ethclient.MakePayload(fuzzer, 1, weekAgo, execHead.Hash(), common.Address{}, common.Hash{}, &common.Hash{}) + require.NoError(t, err) + + marshaled, err := json.Marshal(payload) + require.NoError(t, err) + + return &types.MsgExecutionPayload{ExecutionPayload: marshaled} + }, + expectedErr: "invalid payload timestamp", + }, + { + name: "fail: invalid payload random", + msg: func(c context.Context) *types.MsgExecutionPayload { + execHead, err := keeper.getExecutionHead(c) + require.NoError(t, err) + + payload, err := ethclient.MakePayload(fuzzer, execHead.GetBlockHeight()+1, uint64(now.Unix()), execHead.Hash(), common.Address{}, common.Hash{}, &common.Hash{}) + require.NoError(t, err) + + marshaled, err := json.Marshal(payload) + require.NoError(t, err) + + return &types.MsgExecutionPayload{ExecutionPayload: marshaled} + }, + expectedErr: "invalid payload random", + }, + { + name: "pass: valid payload", + msg: func(c context.Context) *types.MsgExecutionPayload { + execHead, err := keeper.getExecutionHead(c) + require.NoError(t, err) + + payload, err := ethclient.MakePayload(fuzzer, execHead.GetBlockHeight()+1, uint64(now.Unix()), execHead.Hash(), common.Address{}, execHead.Hash(), &common.Hash{}) + require.NoError(t, err) + + marshaled, err := json.Marshal(payload) + require.NoError(t, err) + + return &types.MsgExecutionPayload{ExecutionPayload: marshaled} + }, + }, + { + name: "pass: valid payload when consensus block time is less than execution block time", + setup: func(c context.Context) sdk.Context { + execHead, err := keeper.getExecutionHead(c) + require.NoError(t, err) + // update execution head with current block time + err = keeper.updateExecutionHead(c, engine.ExecutableData{ + Number: execHead.GetBlockHeight(), + BlockHash: common.BytesToHash(execHead.GetBlockHash()), + Timestamp: uint64(now.Unix()), + }) + require.NoError(t, err) + + // set block time to be less than execution block time + sdkCtx := sdk.UnwrapSDKContext(c) + sdkCtx = sdkCtx.WithBlockTime(now.Add(-24 * time.Hour)) + + return sdkCtx + }, + msg: func(c context.Context) *types.MsgExecutionPayload { + execHead, err := keeper.getExecutionHead(c) + require.NoError(t, err) + + payload, err := ethclient.MakePayload(fuzzer, execHead.GetBlockHeight()+1, execHead.GetBlockTime()+1, execHead.Hash(), common.Address{}, execHead.Hash(), &common.Hash{}) + require.NoError(t, err) + + marshaled, err := json.Marshal(payload) + require.NoError(t, err) + + return &types.MsgExecutionPayload{ExecutionPayload: marshaled} + }, + }, + } + + for _, tc := range tcs { + //nolint:tparallel // cannot run parallel because of data race on execution head table + t.Run(tc.name, func(t *testing.T) { + cachedCtx, _ := ctx.CacheContext() + if tc.setup != nil { + cachedCtx = tc.setup(cachedCtx) + } + _, err := keeper.parseAndVerifyProposedPayload(cachedCtx, tc.msg(cachedCtx)) + if tc.expectedErr != "" { + require.ErrorContains(t, err, tc.expectedErr) + } else { + require.NoError(t, err) + } + }) } +} + +func TestKeeper_setOptimisticPayload(t *testing.T) { + t.Parallel() + _, _, keeper := createKeeper(t, args{ + height: 0, + current: 0, + next: 1, + header: func(height int64, address []byte) cmtproto.Header { + return cmtproto.Header{Height: height, ProposerAddress: address} + }, + }) + + // check existing values + require.Nil(t, keeper.mutablePayload.ID) + require.Zero(t, keeper.mutablePayload.Height) + + // set new values + keeper.setOptimisticPayload(&engine.PayloadID{1}, 1) + require.Equal(t, uint64(1), keeper.mutablePayload.Height) + require.Equal(t, engine.PayloadID{1}, *keeper.mutablePayload.ID) +} + +func TestKeeper_isNextProposer(t *testing.T) { + t.Parallel() height := int64(1) tests := []struct { name string @@ -117,30 +349,7 @@ func TestKeeper_isNextProposer(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - cdc := getCodec(t) - txConfig := authtx.NewTxConfig(cdc, nil) - mockEngine, err := newMockEngineAPI(0) - require.NoError(t, err) - - cmtAPI := newMockCometAPI(t, tt.args.validatorsFunc) - header := tt.args.header(height, cmtAPI.validatorSet.Validators[tt.args.current].Address) - - nxtAddr, err := k1util.PubKeyToAddress(cmtAPI.validatorSet.Validators[tt.args.next].PubKey) - require.NoError(t, err) - - ctrl := gomock.NewController(t) - mockClient := mock.NewMockClient(ctrl) - ak := moduletestutil.NewMockAccountKeeper(ctrl) - esk := moduletestutil.NewMockEvmStakingKeeper(ctrl) - uk := moduletestutil.NewMockUpgradeKeeper(ctrl) - - ctx, storeService := setupCtxStore(t, &header) - - keeper, err := NewKeeper(cdc, storeService, &mockEngine, mockClient, txConfig, ak, esk, uk) - require.NoError(t, err) - keeper.SetCometAPI(cmtAPI) - keeper.SetValidatorAddress(nxtAddr) - populateGenesisHead(ctx, t, keeper) + ctx, cmtAPI, keeper := createKeeper(t, tt.args) got, err := keeper.isNextProposer(ctx, ctx.BlockHeader().ProposerAddress, ctx.BlockHeader().Height) if (err != nil) != tt.wantErr { diff --git a/lib/ethclient/enginemock.go b/lib/ethclient/enginemock.go index e3f26336..fe4251b2 100644 --- a/lib/ethclient/enginemock.go +++ b/lib/ethclient/enginemock.go @@ -108,7 +108,7 @@ func MockGenesisBlock() (*types.Block, error) { fuzzer = NewFuzzer(timestamp) ) - genesisPayload, err := makePayload(fuzzer, height, uint64(timestamp), parentHash, common.Address{}, parentHash, &parentBeaconRoot) + genesisPayload, err := MakePayload(fuzzer, height, uint64(timestamp), parentHash, common.Address{}, parentHash, &parentBeaconRoot) if err != nil { return nil, errors.Wrap(err, "make next payload") } @@ -337,7 +337,7 @@ func (m *engineMock) ForkchoiceUpdatedV3(ctx context.Context, update engine.Fork // If we have payload attributes, make a new payload if attrs != nil { - payload, err := makePayload(m.fuzzer, m.head.NumberU64()+1, + payload, err := MakePayload(m.fuzzer, m.head.NumberU64()+1, attrs.Timestamp, update.HeadBlockHash, attrs.SuggestedFeeRecipient, attrs.Random, attrs.BeaconRoot) if err != nil { return engine.ForkChoiceResponse{}, err @@ -396,8 +396,8 @@ func (*engineMock) GetPayloadV2(context.Context, engine.PayloadID) (*engine.Exec panic("implement me") } -// makePayload returns a new fuzzed payload using head as parent if provided. -func makePayload(fuzzer *fuzz.Fuzzer, height uint64, timestamp uint64, parentHash common.Hash, +// MakePayload returns a new fuzzed payload using head as parent if provided. +func MakePayload(fuzzer *fuzz.Fuzzer, height uint64, timestamp uint64, parentHash common.Hash, feeRecipient common.Address, randao common.Hash, beaconRoot *common.Hash) (engine.ExecutableData, error) { // Build a new header var header types.Header From dd832fec717a4122db4eceb19d2a913943e4492d Mon Sep 17 00:00:00 2001 From: zsystm <124245155+zsystm@users.noreply.github.com> Date: Tue, 24 Sep 2024 14:50:26 +0900 Subject: [PATCH 13/29] test(evmengine): add test cases for genesis (#134) increase coverage to 83.3% moved common function to keeper_internal_test.go --- client/x/evmengine/keeper/db_internal_test.go | 28 ---- .../evmengine/keeper/genesis_internal_test.go | 131 ++++++++++++++++++ .../evmengine/keeper/keeper_internal_test.go | 26 ++++ 3 files changed, 157 insertions(+), 28 deletions(-) create mode 100644 client/x/evmengine/keeper/genesis_internal_test.go diff --git a/client/x/evmengine/keeper/db_internal_test.go b/client/x/evmengine/keeper/db_internal_test.go index a34a784d..32d0eeda 100644 --- a/client/x/evmengine/keeper/db_internal_test.go +++ b/client/x/evmengine/keeper/db_internal_test.go @@ -3,39 +3,11 @@ package keeper import ( "testing" - sdk "github.com/cosmos/cosmos-sdk/types" - authtx "github.com/cosmos/cosmos-sdk/x/auth/tx" "github.com/ethereum/go-ethereum/beacon/engine" "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/require" - - moduletestutil "github.com/piplabs/story/client/x/evmengine/testutil" - "github.com/piplabs/story/lib/ethclient/mock" - - "go.uber.org/mock/gomock" ) -func createTestKeeper(t *testing.T) (sdk.Context, *Keeper) { - t.Helper() - cdc := getCodec(t) - txConfig := authtx.NewTxConfig(cdc, nil) - - mockEngine, err := newMockEngineAPI(0) - require.NoError(t, err) - - ctrl := gomock.NewController(t) - mockClient := mock.NewMockClient(ctrl) - ak := moduletestutil.NewMockAccountKeeper(ctrl) - esk := moduletestutil.NewMockEvmStakingKeeper(ctrl) - uk := moduletestutil.NewMockUpgradeKeeper(ctrl) - ctx, storeService := setupCtxStore(t, nil) - ctx = ctx.WithExecMode(sdk.ExecModeFinalize) - keeper, err := NewKeeper(cdc, storeService, &mockEngine, mockClient, txConfig, ak, esk, uk) - require.NoError(t, err) - - return ctx, keeper -} - func TestKeeper_InsertGenesisHead(t *testing.T) { t.Parallel() diff --git a/client/x/evmengine/keeper/genesis_internal_test.go b/client/x/evmengine/keeper/genesis_internal_test.go new file mode 100644 index 00000000..1dd7ef8e --- /dev/null +++ b/client/x/evmengine/keeper/genesis_internal_test.go @@ -0,0 +1,131 @@ +package keeper + +import ( + "context" + "testing" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + "github.com/piplabs/story/client/x/evmengine/types" +) + +func TestKeeper_InitGenesis(t *testing.T) { + t.Parallel() + dummyExecutionHead := common.HexToHash("0x047e24c3455107d87c68dffa307b3b7fa1877f3e9d7f30c7ee359f2eff3a75d9") + validParams := types.NewParams(dummyExecutionHead.Bytes()) + + tcs := []struct { + name string + gs func() *types.GenesisState + setup func(c context.Context, k *Keeper) + postStateCheck func(c context.Context, k *Keeper) + expectedError string + requirePanic bool + }{ + { + name: "pass", + gs: func() *types.GenesisState { + return &types.GenesisState{ + Params: validParams, + } + }, + postStateCheck: func(c context.Context, k *Keeper) { + params, err := k.GetParams(c) + require.NoError(t, err) + require.Equal(t, validParams, params) + }, + }, + { + name: "fail: invalid execution block hash", + gs: func() *types.GenesisState { + invalidParams := validParams + invalidParams.ExecutionBlockHash = []byte("invalid") + + return &types.GenesisState{ + Params: invalidParams, + } + }, + expectedError: "invalid execution block hash length", + }, + { + name: "panic: execution head already exists", + setup: func(c context.Context, k *Keeper) { + require.NoError(t, k.InsertGenesisHead(c, dummyExecutionHead.Bytes())) + }, + gs: func() *types.GenesisState { + return &types.GenesisState{ + Params: validParams, + } + }, + expectedError: "insert genesis head: unexpected genesis head id", + requirePanic: true, + }, + } + + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + ctx, keeper := createTestKeeper(t) + if tc.setup != nil { + tc.setup(ctx, keeper) + } + if tc.requirePanic { + require.PanicsWithError(t, tc.expectedError, func() { + _ = keeper.InitGenesis(ctx, tc.gs()) + }) + } else { + err := keeper.InitGenesis(ctx, tc.gs()) + if tc.expectedError != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tc.expectedError) + } else { + require.NoError(t, err) + tc.postStateCheck(ctx, keeper) + } + } + }) + } +} + +func TestKeeper_ExportGenesis(t *testing.T) { + t.Parallel() + dummyExecutionHead := common.HexToHash("0x047e24c3455107d87c68dffa307b3b7fa1877f3e9d7f30c7ee359f2eff3a75d9") + validParams := types.NewParams(dummyExecutionHead.Bytes()) + + tcs := []struct { + name string + setup func(c context.Context, k *Keeper) + postStateCheck func(c context.Context, k *Keeper) + }{ + { + name: "pass", + setup: func(c context.Context, k *Keeper) { + require.NoError(t, k.SetParams(c, validParams)) + }, + postStateCheck: func(c context.Context, k *Keeper) { + gs := k.ExportGenesis(sdk.UnwrapSDKContext(c)) + require.Equal(t, validParams, gs.Params) + }, + }, + { + name: "pass: default params", + postStateCheck: func(c context.Context, k *Keeper) { + gs := k.ExportGenesis(sdk.UnwrapSDKContext(c)) + require.Equal(t, types.DefaultParams(), gs.Params) + }, + }, + } + + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + ctx, keeper := createTestKeeper(t) + if tc.setup != nil { + tc.setup(ctx, keeper) + } + tc.postStateCheck(ctx, keeper) + }) + } +} diff --git a/client/x/evmengine/keeper/keeper_internal_test.go b/client/x/evmengine/keeper/keeper_internal_test.go index 311c7557..ccda9907 100644 --- a/client/x/evmengine/keeper/keeper_internal_test.go +++ b/client/x/evmengine/keeper/keeper_internal_test.go @@ -35,6 +35,32 @@ type args struct { header func(height int64, address []byte) cmtproto.Header } +func createTestKeeper(t *testing.T) (context.Context, *Keeper) { + t.Helper() + + cdc := getCodec(t) + txConfig := authtx.NewTxConfig(cdc, nil) + mockEngine, err := newMockEngineAPI(0) + require.NoError(t, err) + + cmtAPI := newMockCometAPI(t, nil) + header := cmtproto.Header{Height: 1} + + ctrl := gomock.NewController(t) + mockClient := mock.NewMockClient(ctrl) + ak := moduletestutil.NewMockAccountKeeper(ctrl) + esk := moduletestutil.NewMockEvmStakingKeeper(ctrl) + uk := moduletestutil.NewMockUpgradeKeeper(ctrl) + + ctx, storeService := setupCtxStore(t, &header) + + keeper, err := NewKeeper(cdc, storeService, &mockEngine, mockClient, txConfig, ak, esk, uk) + require.NoError(t, err) + keeper.SetCometAPI(cmtAPI) + + return ctx, keeper +} + func createKeeper(t *testing.T, args args) (sdk.Context, *mockCometAPI, *Keeper) { t.Helper() From 4d7dbe1c1025ecabb1ee38e10d05d411f35f8728 Mon Sep 17 00:00:00 2001 From: zsystm <124245155+zsystm@users.noreply.github.com> Date: Tue, 24 Sep 2024 14:55:35 +0900 Subject: [PATCH 14/29] add test cases for genesis (#135) --- client/x/evmengine/types/genesis_test.go | 57 ++++++++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 client/x/evmengine/types/genesis_test.go diff --git a/client/x/evmengine/types/genesis_test.go b/client/x/evmengine/types/genesis_test.go new file mode 100644 index 00000000..964a9955 --- /dev/null +++ b/client/x/evmengine/types/genesis_test.go @@ -0,0 +1,57 @@ +package types_test + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + "github.com/piplabs/story/client/x/evmengine/types" +) + +func TestNewGenesisState(t *testing.T) { + t.Parallel() + dummyExecutionHead := common.HexToHash("0x047e24c3455107d87c68dffa307b3b7fa1877f3e9d7f30c7ee359f2eff3a75d9") + tcs := []struct { + name string + params types.Params + expectedResult *types.GenesisState + }{ + { + name: "not nil params", + params: types.Params{ + ExecutionBlockHash: dummyExecutionHead.Bytes(), + }, + expectedResult: &types.GenesisState{ + Params: types.Params{ + ExecutionBlockHash: dummyExecutionHead.Bytes(), + }, + }, + }, + { + name: "nil execution block hash", + params: types.Params{ + ExecutionBlockHash: nil, + }, + expectedResult: &types.GenesisState{ + Params: types.Params{ + ExecutionBlockHash: nil, + }, + }, + }, + } + + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + result := types.NewGenesisState(tc.params) + require.Equal(t, tc.params, result.Params) + }) + } +} + +func TestDefaultGenesisState(t *testing.T) { + t.Parallel() + result := types.DefaultGenesisState() + require.Equal(t, types.DefaultParams(), result.Params) +} From 70850ca69dd9d6195b68319d5f79d445e0334417 Mon Sep 17 00:00:00 2001 From: zsystm <124245155+zsystm@users.noreply.github.com> Date: Tue, 24 Sep 2024 15:00:00 +0900 Subject: [PATCH 15/29] test(evmengine): add test cases for params (#136) increased coverage to 77.3% --- .../evmengine/keeper/params_internal_test.go | 48 +++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 client/x/evmengine/keeper/params_internal_test.go diff --git a/client/x/evmengine/keeper/params_internal_test.go b/client/x/evmengine/keeper/params_internal_test.go new file mode 100644 index 00000000..e5b40a9d --- /dev/null +++ b/client/x/evmengine/keeper/params_internal_test.go @@ -0,0 +1,48 @@ +package keeper + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + "github.com/piplabs/story/client/x/evmengine/types" +) + +func TestKeeper_ExecutionBlockHash(t *testing.T) { + t.Parallel() + ctx, keeper := createTestKeeper(t) + + // check existing execution block hash + execHash, err := keeper.ExecutionBlockHash(ctx) + require.NoError(t, err) + require.Nil(t, execHash, "execution block hash should be nil because it is not set yet") + + // set execution block hash + dummyHash := common.HexToHash("0x047e24c3455107d87c68dffa307b3b7fa1877f3e9d7f30c7ee359f2eff3a75d9") + require.NoError(t, keeper.SetParams(ctx, types.Params{ExecutionBlockHash: dummyHash.Bytes()})) + + // check execution block hash whether it is set correctly + execHash, err = keeper.ExecutionBlockHash(ctx) + require.NoError(t, err) + require.Equal(t, dummyHash.Bytes(), execHash, "execution block hash should be equal to the dummy hash") +} + +func TestKeeper_GetSetParams(t *testing.T) { + t.Parallel() + ctx, keeper := createTestKeeper(t) + + // check existing params + params, err := keeper.GetParams(ctx) + require.NoError(t, err) + require.Equal(t, types.DefaultParams(), params, "params should be equal to the default params") + + // set execution block hash + dummyHash := common.HexToHash("0x047e24c3455107d87c68dffa307b3b7fa1877f3e9d7f30c7ee359f2eff3a75d9") + require.NoError(t, keeper.SetParams(ctx, types.Params{ExecutionBlockHash: dummyHash.Bytes()})) + + // check params whether it is set correctly + params, err = keeper.GetParams(ctx) + require.NoError(t, err) + require.Equal(t, types.Params{ExecutionBlockHash: dummyHash.Bytes()}, params) +} From 1e10970b3cec3505119109c3a725e3016c51977a Mon Sep 17 00:00:00 2001 From: zsystm <124245155+zsystm@users.noreply.github.com> Date: Tue, 24 Sep 2024 15:05:02 +0900 Subject: [PATCH 16/29] add test cases for params (#137) increased coverage to 100% --- client/x/evmengine/types/params_test.go | 88 +++++++++++++++++++++++++ 1 file changed, 88 insertions(+) create mode 100644 client/x/evmengine/types/params_test.go diff --git a/client/x/evmengine/types/params_test.go b/client/x/evmengine/types/params_test.go new file mode 100644 index 00000000..92b17077 --- /dev/null +++ b/client/x/evmengine/types/params_test.go @@ -0,0 +1,88 @@ +package types_test + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + "github.com/piplabs/story/client/x/evmengine/types" +) + +func TestNewParams(t *testing.T) { + t.Parallel() + + dummyHash := common.HexToHash("0x047e24c3455107d87c68dffa307b3b7fa1877f3e9d7f30c7ee359f2eff3a75d9") + tcs := []struct { + name string + executionBlockHash []byte + expectedResult types.Params + }{ + { + name: "non-nil execution block hash", + executionBlockHash: dummyHash.Bytes(), + expectedResult: types.Params{ + ExecutionBlockHash: dummyHash.Bytes(), + }, + }, + { + name: "nil execution block hash", + executionBlockHash: nil, + expectedResult: types.Params{ + ExecutionBlockHash: nil, + }, + }, + } + + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + result := types.NewParams(tc.executionBlockHash) + require.Equal(t, tc.expectedResult, result) + }) + } +} + +func TestDefaultParams(t *testing.T) { + t.Parallel() + + result := types.DefaultParams() + require.Equal(t, types.Params{ + ExecutionBlockHash: nil, + }, result) +} + +func TestValidateExecutionBlockHash(t *testing.T) { + t.Parallel() + + dummyHash := common.HexToHash("0x047e24c3455107d87c68dffa307b3b7fa1877f3e9d7f30c7ee359f2eff3a75d9") + tcs := []struct { + name string + executionBlockHash []byte + expectedError string + }{ + { + name: "pass: valid execution block hash", + executionBlockHash: dummyHash.Bytes(), + }, + { + name: "fail: invalid execution block hash", + executionBlockHash: []byte("invalid"), + expectedError: "invalid execution block hash length", + }, + } + + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + err := types.ValidateExecutionBlockHash(tc.executionBlockHash) + if tc.expectedError == "" { + require.NoError(t, err) + } else { + require.EqualError(t, err, tc.expectedError) + } + }) + } +} From b8b897db746381d25dfa30ce547d4bd1e99d14c9 Mon Sep 17 00:00:00 2001 From: Haodi <82733821@qq.com> Date: Tue, 24 Sep 2024 14:36:04 +0800 Subject: [PATCH 17/29] feat(script): auto add binary version to s3 file (#142) * add version txt * fixed task name * fixed url * fixed url * opti title * finish test * fixed platform * fixed pre-commit --- .github/workflows/ci-s3.yml | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/.github/workflows/ci-s3.yml b/.github/workflows/ci-s3.yml index d61e2e49..de3edc14 100644 --- a/.github/workflows/ci-s3.yml +++ b/.github/workflows/ci-s3.yml @@ -61,6 +61,7 @@ jobs: echo "Version extracted: $VERSION" echo "VERSION=$VERSION" >> $GITHUB_ENV + echo "VERSION_META=$VERSION_META" >> $GITHUB_ENV - name: Build the story binary run: | @@ -87,6 +88,7 @@ jobs: COMMIT_HASH=$(git rev-parse --short HEAD) FOLDER_NAME="story-${{ matrix.platform }}-${VERSION}-${COMMIT_HASH}" ARCHIVE_NAME="${FOLDER_NAME}.tar.gz" + PUBLIC_DOWNLOAD_URL="https://$S3_BUCKET.s3.us-west-1.amazonaws.com/$BIN_NAME-public/$ARCHIVE_NAME" BIN_NAME_WITH_PATH=./client/$BIN_NAME if [ "$GOOS" = "windows" ]; then @@ -115,8 +117,31 @@ jobs: aws s3 cp s3://$S3_BUCKET/$BIN_NAME/manifest.txt manifest.txt --quiet || touch manifest.txt echo "$TIMESTAMP" >> manifest.txt aws s3 cp manifest.txt s3://$S3_BUCKET/$BIN_NAME/manifest.txt --quiet + + # Update version file + aws s3 cp s3://$S3_BUCKET/$BIN_NAME-public/version.txt version.txt --quiet || printf "File Name\t\t\tVerison\t\t\t\tCommit Hash\t\tTimestamp\n" > version.txt + + if [ "${VERSION_META}" != "stable" ]; then + printf "$VERSION-$COMMIT_HASH\t\t$VERSION\t\t\t$COMMIT_HASH\t\t\t$TIMESTAMP\n" >> version.txt + else + printf "$VERSION-$COMMIT_HASH\t\t\t$VERSION\t\t\t\t$COMMIT_HASH\t\t\t$TIMESTAMP\n" >> version.txt + fi + + aws s3 cp version.txt s3://$S3_BUCKET/$BIN_NAME-public/version.txt --quiet fi + echo "COMMIT_HASH=$COMMIT_HASH" >> $GITHUB_ENV + echo "PUBLIC_DOWNLOAD_URL=$PUBLIC_DOWNLOAD_URL" >> $GITHUB_ENV + + - name: Add binary version back to PR + if: matrix.platform == 'linux-amd64' + uses: mshick/add-pr-comment@v2 + with: + message: | + ### Binary uploaded successfully 🎉 + 📦 **Version Name:** ${{ env.VERSION }}-${{ env.COMMIT_HASH }} + 📦 **Download Source:** [AWS S3](${{ env.PUBLIC_DOWNLOAD_URL }}) + cleanup: runs-on: ubuntu-latest needs: build_and_push From 0d090ffe2b776b94e703e790ca5ea54de47e0e4d Mon Sep 17 00:00:00 2001 From: zsystm <124245155+zsystm@users.noreply.github.com> Date: Tue, 1 Oct 2024 09:22:55 +0900 Subject: [PATCH 18/29] fix(evmengine): nil panic with optimistic build enabled (#128) cmtAPI is lazily set, so during replyBlocks it is nil. --- client/x/evmengine/keeper/keeper.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/client/x/evmengine/keeper/keeper.go b/client/x/evmengine/keeper/keeper.go index f3b3086c..41e265a3 100644 --- a/client/x/evmengine/keeper/keeper.go +++ b/client/x/evmengine/keeper/keeper.go @@ -172,6 +172,12 @@ func (k *Keeper) parseAndVerifyProposedPayload(ctx context.Context, msg *types.M // // Note that the validator set can change, so this is an optimistic check. func (k *Keeper) isNextProposer(ctx context.Context, currentProposer []byte, currentHeight int64) (bool, error) { + // PostFinalize can be called during block replay (performed in newCometNode), + // but cmtAPI is set only after newCometNode completes (see app.SetCometAPI), so a nil check is necessary. + if k.cmtAPI == nil { + return false, nil + } + valset, ok, err := k.cmtAPI.Validators(ctx, currentHeight) if err != nil { return false, err From 42fc2822326b85d65679ee2f232bd848e68a3734 Mon Sep 17 00:00:00 2001 From: zsystm <124245155+zsystm@users.noreply.github.com> Date: Tue, 1 Oct 2024 18:53:49 +0900 Subject: [PATCH 19/29] test(evmengine/keeper): add test cases for msg server (#100) changes of mockEngineAPI - forceInvalidNewPayloadV3 and forceInvalidForkchoiceUpdatedV3 are added to simulate failed apis changes of engineMock - Add storeKey to make engineMock's methods dependent on sdk.Context for better testability NewBlock - Because of above changes, we need to rlp encode and decode block data. But if we create a block with nil withdrawals and nil withdrawalHash, rlp doesn't work well (couldn't figure out the root cause) - To avoid rlp error, pass non-nil withdrawals so withdrawalHash can be set as non-nil emptyHash value. --- .../x/evmengine/keeper/abci_internal_test.go | 43 +++- .../evmengine/keeper/keeper_internal_test.go | 12 +- .../keeper/msg_server_internal_test.go | 237 ++++++++++++++---- .../keeper/proposal_server_internal_test.go | 8 +- .../keeper/upgrades_internal_test.go | 6 +- client/x/evmstaking/keeper/keeper_test.go | 2 +- lib/ethclient/enginemock.go | 116 +++++++-- 7 files changed, 333 insertions(+), 91 deletions(-) diff --git a/client/x/evmengine/keeper/abci_internal_test.go b/client/x/evmengine/keeper/abci_internal_test.go index 0fd30ee5..21e00c26 100644 --- a/client/x/evmengine/keeper/abci_internal_test.go +++ b/client/x/evmengine/keeper/abci_internal_test.go @@ -124,7 +124,7 @@ func TestKeeper_PrepareProposal(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ctx, storeService := setupCtxStore(t, nil) + ctx, storeKey, storeService := setupCtxStore(t, nil) cdc := getCodec(t) txConfig := authtx.NewTxConfig(cdc, nil) @@ -138,7 +138,7 @@ func TestKeeper_PrepareProposal(t *testing.T) { } var err error - tt.mockEngine.EngineClient, err = ethclient.NewEngineMock() + tt.mockEngine.EngineClient, err = ethclient.NewEngineMock(storeKey) require.NoError(t, err) k, err := NewKeeper(cdc, storeService, &tt.mockEngine, &tt.mockClient, txConfig, ak, esk, uk) @@ -160,11 +160,11 @@ func TestKeeper_PrepareProposal(t *testing.T) { t.Run("TestBuildNonOptimistic", func(t *testing.T) { t.Parallel() // setup dependencies - ctx, storeService := setupCtxStore(t, nil) + ctx, storeKey, storeService := setupCtxStore(t, nil) cdc := getCodec(t) txConfig := authtx.NewTxConfig(cdc, nil) - mockEngine, err := newMockEngineAPI(0) + mockEngine, err := newMockEngineAPI(storeKey, 0) require.NoError(t, err) ctrl := gomock.NewController(t) @@ -250,14 +250,14 @@ func assertExecutablePayload(t *testing.T, msg sdk.Msg, ts int64, blockHash comm // require.Equal(t, evmLog.Address, zeroAddr.Bytes()) } -func ctxWithAppHash(t *testing.T, appHash common.Hash) context.Context { +func ctxWithAppHash(t *testing.T, appHash common.Hash) (context.Context, *storetypes.KVStoreKey) { t.Helper() - ctx, _ := setupCtxStore(t, &cmtproto.Header{AppHash: appHash.Bytes()}) + ctx, storeKey, _ := setupCtxStore(t, &cmtproto.Header{AppHash: appHash.Bytes()}) - return ctx + return ctx, storeKey } -func setupCtxStore(t *testing.T, header *cmtproto.Header) (sdk.Context, store.KVStoreService) { +func setupCtxStore(t *testing.T, header *cmtproto.Header) (sdk.Context, *storetypes.KVStoreKey, store.KVStoreService) { t.Helper() key := storetypes.NewKVStoreKey("test") storeService := runtime.NewKVStoreService(key) @@ -267,7 +267,7 @@ func setupCtxStore(t *testing.T, header *cmtproto.Header) (sdk.Context, store.KV } ctx := testCtx.Ctx.WithBlockHeader(*header) - return ctx, storeService + return ctx, key, storeService } func getCodec(t *testing.T) codec.Codec { @@ -304,11 +304,15 @@ type mockEngineAPI struct { headerByTypeFunc func(context.Context, ethclient.HeadType) (*types.Header, error) forkchoiceUpdatedV3Func func(context.Context, eengine.ForkchoiceStateV1, *eengine.PayloadAttributes) (eengine.ForkChoiceResponse, error) newPayloadV3Func func(context.Context, eengine.ExecutableData, []common.Hash, *common.Hash) (eengine.PayloadStatusV1, error) + // forceInvalidNewPayloadV3 forces the NewPayloadV3 returns an invalid status. + forceInvalidNewPayloadV3 bool + // forceInvalidForkchoiceUpdatedV3 forces the ForkchoiceUpdatedV3 returns an invalid status. + forceInvalidForkchoiceUpdatedV3 bool } // newMockEngineAPI returns a new mock engine API with a fuzzer and a mock engine client. -func newMockEngineAPI(syncings int) (mockEngineAPI, error) { - me, err := ethclient.NewEngineMock() +func newMockEngineAPI(key *storetypes.KVStoreKey, syncings int) (mockEngineAPI, error) { + me, err := ethclient.NewEngineMock(key) if err != nil { return mockEngineAPI{}, err } @@ -395,6 +399,13 @@ func (m *mockEngineAPI) NewPayloadV2(ctx context.Context, params eengine.Executa //nolint:nonamedreturns // Required for defer func (m *mockEngineAPI) NewPayloadV3(ctx context.Context, params eengine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash) (resp eengine.PayloadStatusV1, err error) { + if m.forceInvalidNewPayloadV3 { + m.forceInvalidNewPayloadV3 = false + return eengine.PayloadStatusV1{ + Status: eengine.INVALID, + }, nil + } + if status, ok := m.maybeSync(); ok { defer func() { resp.Status = status.Status @@ -410,6 +421,14 @@ func (m *mockEngineAPI) NewPayloadV3(ctx context.Context, params eengine.Executa //nolint:nonamedreturns // Required for defer func (m *mockEngineAPI) ForkchoiceUpdatedV3(ctx context.Context, update eengine.ForkchoiceStateV1, payloadAttributes *eengine.PayloadAttributes) (resp eengine.ForkChoiceResponse, err error) { + if m.forceInvalidForkchoiceUpdatedV3 { + m.forceInvalidForkchoiceUpdatedV3 = false + return eengine.ForkChoiceResponse{ + PayloadStatus: eengine.PayloadStatusV1{ + Status: eengine.INVALID, + }, + }, nil + } if status, ok := m.maybeSync(); ok { defer func() { resp.PayloadStatus.Status = status.Status @@ -448,7 +467,7 @@ func (m *mockEngineAPI) nextBlock( header.ParentBeaconRoot = beaconRoot // Convert header to block - block := types.NewBlock(&header, nil, nil, trie.NewStackTrie(nil)) + block := types.NewBlock(&header, &types.Body{Withdrawals: make([]*types.Withdrawal, 0)}, nil, trie.NewStackTrie(nil)) // Convert block to payload env := eengine.BlockToExecutableData(block, big.NewInt(0), nil) diff --git a/client/x/evmengine/keeper/keeper_internal_test.go b/client/x/evmengine/keeper/keeper_internal_test.go index ccda9907..40fd34a9 100644 --- a/client/x/evmengine/keeper/keeper_internal_test.go +++ b/client/x/evmengine/keeper/keeper_internal_test.go @@ -40,8 +40,6 @@ func createTestKeeper(t *testing.T) (context.Context, *Keeper) { cdc := getCodec(t) txConfig := authtx.NewTxConfig(cdc, nil) - mockEngine, err := newMockEngineAPI(0) - require.NoError(t, err) cmtAPI := newMockCometAPI(t, nil) header := cmtproto.Header{Height: 1} @@ -52,7 +50,9 @@ func createTestKeeper(t *testing.T) (context.Context, *Keeper) { esk := moduletestutil.NewMockEvmStakingKeeper(ctrl) uk := moduletestutil.NewMockUpgradeKeeper(ctrl) - ctx, storeService := setupCtxStore(t, &header) + ctx, storeKey, storeService := setupCtxStore(t, &header) + mockEngine, err := newMockEngineAPI(storeKey, 0) + require.NoError(t, err) keeper, err := NewKeeper(cdc, storeService, &mockEngine, mockClient, txConfig, ak, esk, uk) require.NoError(t, err) @@ -66,8 +66,6 @@ func createKeeper(t *testing.T, args args) (sdk.Context, *mockCometAPI, *Keeper) cdc := getCodec(t) txConfig := authtx.NewTxConfig(cdc, nil) - mockEngine, err := newMockEngineAPI(0) - require.NoError(t, err) cmtAPI := newMockCometAPI(t, args.validatorsFunc) header := args.header(args.height, cmtAPI.validatorSet.Validators[args.current].Address) @@ -81,7 +79,9 @@ func createKeeper(t *testing.T, args args) (sdk.Context, *mockCometAPI, *Keeper) esk := moduletestutil.NewMockEvmStakingKeeper(ctrl) uk := moduletestutil.NewMockUpgradeKeeper(ctrl) - ctx, storeService := setupCtxStore(t, &header) + ctx, storeKey, storeService := setupCtxStore(t, &header) + mockEngine, err := newMockEngineAPI(storeKey, 0) + require.NoError(t, err) keeper, err := NewKeeper(cdc, storeService, &mockEngine, mockClient, txConfig, ak, esk, uk) require.NoError(t, err) keeper.SetCometAPI(cmtAPI) diff --git a/client/x/evmengine/keeper/msg_server_internal_test.go b/client/x/evmengine/keeper/msg_server_internal_test.go index ab0206f4..e2378bca 100644 --- a/client/x/evmengine/keeper/msg_server_internal_test.go +++ b/client/x/evmengine/keeper/msg_server_internal_test.go @@ -18,6 +18,7 @@ import ( moduletestutil "github.com/piplabs/story/client/x/evmengine/testutil" "github.com/piplabs/story/client/x/evmengine/types" + "github.com/piplabs/story/contracts/bindings" "github.com/piplabs/story/lib/errors" "github.com/piplabs/story/lib/ethclient" "github.com/piplabs/story/lib/ethclient/mock" @@ -34,19 +35,12 @@ func Test_msgServer_ExecutionPayload(t *testing.T) { cdc := getCodec(t) txConfig := authtx.NewTxConfig(cdc, nil) - mockEngine, err := newMockEngineAPI(2) - require.NoError(t, err) - ctrl := gomock.NewController(t) mockClient := mock.NewMockClient(ctrl) ak := moduletestutil.NewMockAccountKeeper(ctrl) esk := moduletestutil.NewMockEvmStakingKeeper(ctrl) uk := moduletestutil.NewMockUpgradeKeeper(ctrl) - // Expected call for PeekEligibleWithdrawals - esk.EXPECT().DequeueEligibleWithdrawals(gomock.Any()).Return(nil, nil).AnyTimes() - esk.EXPECT().ProcessStakingEvents(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() - cmtAPI := newMockCometAPI(t, nil) // set the header and proposer so we have the correct next proposer header := cmtproto.Header{Height: 1, AppHash: tutil.RandomHash().Bytes()} @@ -54,10 +48,11 @@ func Test_msgServer_ExecutionPayload(t *testing.T) { nxtAddr, err := k1util.PubKeyToAddress(cmtAPI.validatorSet.Validators[1].PubKey) require.NoError(t, err) - ctx, storeService := setupCtxStore(t, &header) + ctx, storeKey, storeService := setupCtxStore(t, &header) ctx = ctx.WithExecMode(sdk.ExecModeFinalize) - evmLogProc := mockLogProvider{deliverErr: errors.New("test error")} + mockEngine, err := newMockEngineAPI(storeKey, 2) + require.NoError(t, err) keeper, err := NewKeeper(cdc, storeService, &mockEngine, mockClient, txConfig, ak, esk, uk) require.NoError(t, err) keeper.SetCometAPI(cmtAPI) @@ -65,54 +60,210 @@ func Test_msgServer_ExecutionPayload(t *testing.T) { populateGenesisHead(ctx, t, keeper) msgSrv := NewMsgServerImpl(keeper) - - var payloadData []byte - var payloadID engine.PayloadID - var latestHeight uint64 - var block *etypes.Block - newPayload := func(ctx context.Context) { + createValidPayload := func(c context.Context) (*etypes.Block, engine.PayloadID, []byte) { // get latest block to build on top - latestBlock, err := mockEngine.HeaderByType(ctx, ethclient.HeadLatest) + latestBlock, err := mockEngine.HeaderByType(c, ethclient.HeadLatest) require.NoError(t, err) latestHeight := latestBlock.Number.Uint64() - sdkCtx := sdk.UnwrapSDKContext(ctx) + sdkCtx := sdk.UnwrapSDKContext(c) appHash := common.BytesToHash(sdkCtx.BlockHeader().AppHash) - b, execPayload := mockEngine.nextBlock(t, latestHeight+1, uint64(time.Now().Unix()), latestBlock.Hash(), keeper.validatorAddr, &appHash) - block = b - - payloadID, err = ethclient.MockPayloadID(execPayload, &appHash) + block, execPayload := mockEngine.nextBlock(t, latestHeight+1, uint64(time.Now().Unix()), latestBlock.Hash(), keeper.validatorAddr, &appHash) + payloadID, err := ethclient.MockPayloadID(execPayload, &appHash) require.NoError(t, err) // Create execution payload message - payloadData, err = json.Marshal(execPayload) + payloadData, err := json.Marshal(execPayload) require.NoError(t, err) - } - assertExecutionPayload := func(ctx context.Context) { - events, err := evmLogProc.Prepare(ctx, block.Hash()) + return block, payloadID, payloadData + } + createRandomEvents := func(c context.Context, blkHash common.Hash) []*types.EVMEvent { + events, err := evmLogProc.Prepare(c, blkHash) require.NoError(t, err) - resp, err := msgSrv.ExecutionPayload(ctx, &types.MsgExecutionPayload{ - Authority: authtypes.NewModuleAddress(types.ModuleName).String(), - ExecutionPayload: payloadData, - PrevPayloadEvents: events, - }) - require.NoError(t, err) - require.NotNil(t, resp) + return events + } - gotPayload, err := mockEngine.GetPayloadV3(ctx, payloadID) - require.NoError(t, err) - // make sure height is increasing in engine, blocks being built - require.Equal(t, gotPayload.ExecutionPayload.Number, latestHeight+1) - require.Equal(t, gotPayload.ExecutionPayload.BlockHash, block.Hash()) - require.Equal(t, gotPayload.ExecutionPayload.FeeRecipient, keeper.validatorAddr) - require.Empty(t, gotPayload.ExecutionPayload.Withdrawals) + tcs := []struct { + name string + setup func(c context.Context) sdk.Context + createPayload func(c context.Context) (*etypes.Block, engine.PayloadID, []byte) + createPrevPayloadEvents func(c context.Context, blkHash common.Hash) []*types.EVMEvent + expectedError string + postCheck func(c context.Context, block *etypes.Block, payloadID engine.PayloadID) + }{ + { + name: "pass: valid payload", + setup: func(c context.Context) sdk.Context { + esk.EXPECT().DequeueEligibleWithdrawals(c).Return(nil, nil) + esk.EXPECT().ProcessStakingEvents(c, gomock.Any(), gomock.Any()).Return(nil) + + return sdk.UnwrapSDKContext(c) + }, + createPayload: createValidPayload, + createPrevPayloadEvents: createRandomEvents, + postCheck: func(c context.Context, block *etypes.Block, payloadID engine.PayloadID) { + gotPayload, err := mockEngine.GetPayloadV3(c, payloadID) + require.NoError(t, err) + require.Equal(t, gotPayload.ExecutionPayload.Number, block.Header().Number.Uint64()) + require.Equal(t, gotPayload.ExecutionPayload.BlockHash, block.Hash()) + require.Equal(t, gotPayload.ExecutionPayload.FeeRecipient, keeper.validatorAddr) + require.Empty(t, gotPayload.ExecutionPayload.Withdrawals) + }, + }, + { + name: "fail: sdk exec mode is not finalize", + setup: func(c context.Context) sdk.Context { + return sdk.UnwrapSDKContext(c).WithExecMode(sdk.ExecModeCheck) + }, + expectedError: "only allowed in finalize mode", + }, + { + name: "fail: no execution head", + setup: func(c context.Context) sdk.Context { + head, err := keeper.headTable.Get(c, executionHeadID) + require.NoError(t, err) + require.NoError(t, keeper.headTable.Delete(c, head)) + + return sdk.UnwrapSDKContext(c) + }, + createPayload: createValidPayload, + expectedError: "not found", + }, + { + name: "fail: invalid payload - wrong payload number", + createPayload: func(ctx context.Context) (*etypes.Block, engine.PayloadID, []byte) { + latestBlock, err := mockEngine.HeaderByType(ctx, ethclient.HeadLatest) + require.NoError(t, err) + latestHeight := latestBlock.Number.Uint64() + wrongNextHeight := latestHeight + 2 + + sdkCtx := sdk.UnwrapSDKContext(ctx) + appHash := common.BytesToHash(sdkCtx.BlockHeader().AppHash) + + block, execPayload := mockEngine.nextBlock(t, wrongNextHeight, uint64(time.Now().Unix()), latestBlock.Hash(), keeper.validatorAddr, &appHash) + payloadID, err := ethclient.MockPayloadID(execPayload, &appHash) + require.NoError(t, err) + + // Create execution payload message + payloadData, err := json.Marshal(execPayload) + require.NoError(t, err) + + return block, payloadID, payloadData + }, + createPrevPayloadEvents: createRandomEvents, + expectedError: "invalid proposed payload number", + }, + { + name: "fail: DequeueEligibleWithdrawals error", + setup: func(ctx context.Context) sdk.Context { + esk.EXPECT().DequeueEligibleWithdrawals(ctx).Return(nil, errors.New("failed to dequeue")) + + return sdk.UnwrapSDKContext(ctx) + }, + createPayload: createValidPayload, + expectedError: "error on withdrawals dequeue", + }, + { + name: "fail: NewPayloadV3 returns status invalid", + setup: func(ctx context.Context) sdk.Context { + esk.EXPECT().DequeueEligibleWithdrawals(ctx).Return(nil, nil) + mockEngine.forceInvalidNewPayloadV3 = true + + return sdk.UnwrapSDKContext(ctx) + }, + createPayload: createValidPayload, + createPrevPayloadEvents: createRandomEvents, + expectedError: "payload invalid", + }, + { + name: "fail: ForkchoiceUpdatedV3 returns status invalid", + setup: func(ctx context.Context) sdk.Context { + esk.EXPECT().DequeueEligibleWithdrawals(ctx).Return(nil, nil) + mockEngine.forceInvalidForkchoiceUpdatedV3 = true + + return sdk.UnwrapSDKContext(ctx) + }, + createPayload: createValidPayload, + createPrevPayloadEvents: createRandomEvents, + expectedError: "payload invalid", + }, + { + name: "fail: ProcessStakingEvents error", + setup: func(ctx context.Context) sdk.Context { + esk.EXPECT().DequeueEligibleWithdrawals(ctx).Return(nil, nil) + esk.EXPECT().ProcessStakingEvents(ctx, gomock.Any(), gomock.Any()).Return(errors.New("failed to process staking events")) + + return sdk.UnwrapSDKContext(ctx) + }, + createPayload: createValidPayload, + createPrevPayloadEvents: createRandomEvents, + expectedError: "deliver staking-related event logs", + }, + { + name: "fail: ProcessUpgradeEvents error", + setup: func(ctx context.Context) sdk.Context { + esk.EXPECT().DequeueEligibleWithdrawals(ctx).Return(nil, nil) + esk.EXPECT().ProcessStakingEvents(ctx, gomock.Any(), gomock.Any()).Return(nil) + + return sdk.UnwrapSDKContext(ctx) + }, + createPayload: createValidPayload, + createPrevPayloadEvents: func(_ context.Context, _ common.Hash) []*types.EVMEvent { + // crate invalid upgrade event to trigger ProcessUpgradeEvents failure + upgradeAbi, err := bindings.UpgradeEntrypointMetaData.GetAbi() + require.NoError(t, err, "failed to load ABI") + data, err := upgradeAbi.Events["SoftwareUpgrade"].Inputs.NonIndexed().Pack("test-upgrade", int64(0), "test-info") + require.NoError(t, err) + + return []*types.EVMEvent{{ + Address: nil, // nil address + Topics: [][]byte{types.SoftwareUpgradeEvent.ID.Bytes()}, + Data: data, + }} + }, + expectedError: "deliver upgrade-related event logs", + }, } - newPayload(ctx) - assertExecutionPayload(ctx) + for _, tc := range tcs { + //nolint:tparallel // currently, we can't run the tests in parallel due to the shared mockEngine. don't know how to fix it yet, just disable parallel for now. + t.Run(tc.name, func(t *testing.T) { + // t.Parallel() + var payloadData []byte + var payloadID engine.PayloadID + var block *etypes.Block + var events []*types.EVMEvent + + cachedCtx, _ := ctx.CacheContext() + if tc.setup != nil { + cachedCtx = tc.setup(cachedCtx) + } + if tc.createPayload != nil { + block, payloadID, payloadData = tc.createPayload(cachedCtx) + } + if tc.createPrevPayloadEvents != nil { + events = tc.createPrevPayloadEvents(cachedCtx, block.Hash()) + } + + resp, err := msgSrv.ExecutionPayload(cachedCtx, &types.MsgExecutionPayload{ + Authority: authtypes.NewModuleAddress(types.ModuleName).String(), + ExecutionPayload: payloadData, + PrevPayloadEvents: events, + }) + if tc.expectedError != "" { + require.ErrorContains(t, err, tc.expectedError) + } else { + require.NoError(t, err) + require.NotNil(t, resp) + if tc.postCheck != nil { + tc.postCheck(cachedCtx, block, payloadID) + } + } + }) + } // now lets run optimistic flow // ctx = ctx.WithBlockTime(ctx.BlockTime().Add(time.Second)) @@ -236,9 +387,9 @@ func Test_pushPayload(t *testing.T) { t.Parallel() appHash := tutil.RandomHash() - ctx := ctxWithAppHash(t, appHash) + ctx, storeKey := ctxWithAppHash(t, appHash) - mockEngine, err := newMockEngineAPI(0) + mockEngine, err := newMockEngineAPI(storeKey, 0) require.NoError(t, err) mockEngine.newPayloadV3Func = tt.args.newPayloadV3Func diff --git a/client/x/evmengine/keeper/proposal_server_internal_test.go b/client/x/evmengine/keeper/proposal_server_internal_test.go index cf093770..96c9d5ec 100644 --- a/client/x/evmengine/keeper/proposal_server_internal_test.go +++ b/client/x/evmengine/keeper/proposal_server_internal_test.go @@ -29,9 +29,6 @@ func Test_proposalServer_ExecutionPayload(t *testing.T) { cdc := getCodec(t) txConfig := authtx.NewTxConfig(cdc, nil) - mockEngine, err := newMockEngineAPI(0) - require.NoError(t, err) - ctrl := gomock.NewController(t) mockClient := mock.NewMockClient(ctrl) ak := moduletestutil.NewMockAccountKeeper(ctrl) @@ -40,9 +37,10 @@ func Test_proposalServer_ExecutionPayload(t *testing.T) { esk.EXPECT().PeekEligibleWithdrawals(gomock.Any()).Return(nil, nil).AnyTimes() - sdkCtx, storeService := setupCtxStore(t, &cmtproto.Header{AppHash: tutil.RandomHash().Bytes()}) + sdkCtx, storeKey, storeService := setupCtxStore(t, &cmtproto.Header{AppHash: tutil.RandomHash().Bytes()}) sdkCtx = sdkCtx.WithExecMode(sdk.ExecModeFinalize) - + mockEngine, err := newMockEngineAPI(storeKey, 0) + require.NoError(t, err) keeper, err := NewKeeper(cdc, storeService, &mockEngine, mockClient, txConfig, ak, esk, uk) require.NoError(t, err) populateGenesisHead(sdkCtx, t, keeper) diff --git a/client/x/evmengine/keeper/upgrades_internal_test.go b/client/x/evmengine/keeper/upgrades_internal_test.go index 9f7a7b6a..722c69a1 100644 --- a/client/x/evmengine/keeper/upgrades_internal_test.go +++ b/client/x/evmengine/keeper/upgrades_internal_test.go @@ -254,8 +254,6 @@ func setupTestEnvironment(t *testing.T) (*Keeper, sdk.Context, *gomock.Controlle t.Helper() cdc := getCodec(t) txConfig := authtx.NewTxConfig(cdc, nil) - mockEngine, err := newMockEngineAPI(0) - require.NoError(t, err) cmtAPI := newMockCometAPI(t, nil) header := cmtproto.Header{Height: 1, AppHash: tutil.RandomHash().Bytes(), ProposerAddress: cmtAPI.validatorSet.Validators[0].Address} @@ -265,7 +263,9 @@ func setupTestEnvironment(t *testing.T) (*Keeper, sdk.Context, *gomock.Controlle esk := moduletestutil.NewMockEvmStakingKeeper(ctrl) uk := moduletestutil.NewMockUpgradeKeeper(ctrl) - ctx, storeService := setupCtxStore(t, &header) + ctx, storeKey, storeService := setupCtxStore(t, &header) + mockEngine, err := newMockEngineAPI(storeKey, 0) + require.NoError(t, err) keeper, err := NewKeeper(cdc, storeService, &mockEngine, mockClient, txConfig, ak, esk, uk) require.NoError(t, err) diff --git a/client/x/evmstaking/keeper/keeper_test.go b/client/x/evmstaking/keeper/keeper_test.go index aee98e5b..cfbefe7b 100644 --- a/client/x/evmstaking/keeper/keeper_test.go +++ b/client/x/evmstaking/keeper/keeper_test.go @@ -127,7 +127,7 @@ func (s *TestSuite) SetupTest() { s.Require().NoError(s.StakingKeeper.SetParams(s.Ctx, stypes.DefaultParams())) // emvstaking keeper - ethCl, err := ethclient.NewEngineMock() + ethCl, err := ethclient.NewEngineMock(evmstakingKey) s.Require().NoError(err) evmstakingKeeper := keeper.NewKeeper( marshaler, diff --git a/lib/ethclient/enginemock.go b/lib/ethclient/enginemock.go index fe4251b2..960a961d 100644 --- a/lib/ethclient/enginemock.go +++ b/lib/ethclient/enginemock.go @@ -1,6 +1,7 @@ package ethclient import ( + "bytes" "context" "crypto/sha256" "math/big" @@ -9,7 +10,10 @@ import ( "testing" "time" + storetypes "cosmossdk.io/store/types" + "github.com/cometbft/cometbft/crypto" + sdk "github.com/cosmos/cosmos-sdk/types" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" @@ -17,6 +21,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" fuzz "github.com/google/gofuzz" @@ -26,6 +31,11 @@ import ( "github.com/piplabs/story/lib/log" ) +const ( + // headKey is the key to store the head block. + headKey = "head" +) + type payloadArgs struct { params engine.ExecutableData beaconRoot *common.Hash @@ -42,8 +52,15 @@ type engineMock struct { fuzzer *fuzz.Fuzzer randomErrs float64 - mu sync.Mutex - head *types.Block + mu sync.Mutex + // storeKey is added to make engineMock dependent on sdk.Context for better testability. + // By using storeKey, engineMock's methods can interact with the sdk.Context's store, + // allowing for independent tests that do not interfere with each other’s store state. + storeKey *storetypes.KVStoreKey + // headKey is the key to store the head block. + headKey []byte + genesisBlock *types.Block + // consider the following maps also dependent on sdk.Context if needed. pendingLogs map[common.Address][]types.Log logs map[common.Hash][]types.Log payloads map[engine.PayloadID]payloadArgs @@ -122,18 +139,20 @@ func MockGenesisBlock() (*types.Block, error) { // NewEngineMock returns a new mock engine API client. // Note only some methods are implemented, it will panic if you call an unimplemented method. -func NewEngineMock(opts ...func(mock *engineMock)) (EngineClient, error) { +func NewEngineMock(key *storetypes.KVStoreKey, opts ...func(mock *engineMock)) (EngineClient, error) { genesisBlock, err := MockGenesisBlock() if err != nil { return nil, err } m := &engineMock{ - fuzzer: NewFuzzer(int64(genesisBlock.Time())), - head: genesisBlock, - pendingLogs: make(map[common.Address][]types.Log), - payloads: make(map[engine.PayloadID]payloadArgs), - logs: make(map[common.Hash][]types.Log), + fuzzer: NewFuzzer(int64(genesisBlock.Time())), + storeKey: key, + headKey: []byte(headKey), + genesisBlock: genesisBlock, + pendingLogs: make(map[common.Address][]types.Log), + payloads: make(map[engine.PayloadID]payloadArgs), + logs: make(map[common.Hash][]types.Log), } for _, opt := range opts { opt(m) @@ -154,6 +173,39 @@ func (m *engineMock) maybeErr(ctx context.Context) error { return nil } +// getHeadBlock returns the head block from the store. +func (m *engineMock) getHeadBlock(ctx context.Context) (*types.Block, error) { + sdkCtx := sdk.UnwrapSDKContext(ctx) + headBz := sdkCtx.KVStore(m.storeKey).Get(m.headKey) + if headBz == nil { + // Set genesis block as head + if err := m.setHeadBlock(ctx, m.genesisBlock); err != nil { + return nil, err + } + + return m.genesisBlock, nil + } + var headBlock types.Block + if err := rlp.DecodeBytes(headBz, &headBlock); err != nil { + return nil, errors.Wrap(err, "decode head") + } + + return &headBlock, nil +} + +// setHeadBlock sets the head block in the store. +func (m *engineMock) setHeadBlock(ctx context.Context, head *types.Block) error { + buf := new(bytes.Buffer) + if err := head.EncodeRLP(buf); err != nil { + return errors.Wrap(err, "encode head") + } + headBz := buf.Bytes() + sdkCtx := sdk.UnwrapSDKContext(ctx) + sdkCtx.KVStore(m.storeKey).Set(m.headKey, headBz) + + return nil +} + func (m *engineMock) FilterLogs(_ context.Context, q ethereum.FilterQuery) ([]types.Log, error) { m.mu.Lock() defer m.mu.Unlock() @@ -195,7 +247,12 @@ func (m *engineMock) BlockNumber(ctx context.Context) (uint64, error) { m.mu.Lock() defer m.mu.Unlock() - return m.head.NumberU64(), nil + headBlock, err := m.getHeadBlock(ctx) + if err != nil { + return 0, err + } + + return headBlock.NumberU64(), nil } func (m *engineMock) HeaderByNumber(ctx context.Context, height *big.Int) (*types.Header, error) { @@ -228,11 +285,15 @@ func (m *engineMock) HeaderByHash(ctx context.Context, hash common.Hash) (*types m.mu.Lock() defer m.mu.Unlock() - if hash != m.head.Hash() { + head, err := m.getHeadBlock(ctx) + if err != nil { + return nil, err + } + if hash != head.Hash() { return nil, errors.New("only head hash supported") // Only support latest block } - return m.head.Header(), nil + return head.Header(), nil } func (m *engineMock) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) { @@ -243,15 +304,19 @@ func (m *engineMock) BlockByNumber(ctx context.Context, number *big.Int) (*types m.mu.Lock() defer m.mu.Unlock() + head, err := m.getHeadBlock(ctx) + if err != nil { + return nil, err + } if number == nil { - return m.head, nil + return head, nil } - if number.Cmp(m.head.Number()) != 0 { + if number.Cmp(head.Number()) != 0 { return nil, errors.New("block not found") // Only support latest block } - return m.head, nil + return head, nil } func (m *engineMock) NewPayloadV3(ctx context.Context, params engine.ExecutableData, _ []common.Hash, beaconRoot *common.Hash) (engine.PayloadStatusV1, error) { @@ -262,6 +327,8 @@ func (m *engineMock) NewPayloadV3(ctx context.Context, params engine.ExecutableD m.mu.Lock() defer m.mu.Unlock() + // if Withdrawals is nil, cannot rlp encode and decode properly. + params.Withdrawals = make([]*types.Withdrawal, 0) args := payloadArgs{ params: params, beaconRoot: beaconRoot, @@ -300,9 +367,14 @@ func (m *engineMock) ForkchoiceUpdatedV3(ctx context.Context, update engine.Fork }, } + head, err := m.getHeadBlock(ctx) + if err != nil { + return engine.ForkChoiceResponse{}, err + } + // Maybe update head //nolint: nestif // this is a mock it's fine - if m.head.Hash() != update.HeadBlockHash { + if head.Hash() != update.HeadBlockHash { var found bool for _, args := range m.payloads { block, err := engine.ExecutableDataToBlock(args.params, nil, args.beaconRoot) @@ -314,11 +386,13 @@ func (m *engineMock) ForkchoiceUpdatedV3(ctx context.Context, update engine.Fork continue } - if err := verifyChild(m.head, block); err != nil { + if err := verifyChild(head, block); err != nil { return engine.ForkChoiceResponse{}, err } - m.head = block + if err := m.setHeadBlock(ctx, block); err != nil { + return engine.ForkChoiceResponse{}, err + } found = true id, err := MockPayloadID(args.params, args.beaconRoot) @@ -331,13 +405,13 @@ func (m *engineMock) ForkchoiceUpdatedV3(ctx context.Context, update engine.Fork } if !found { return engine.ForkChoiceResponse{}, errors.New("forkchoice block not found", - log.Hex7("forkchoice", m.head.Hash().Bytes())) + log.Hex7("forkchoice", head.Hash().Bytes())) } } // If we have payload attributes, make a new payload if attrs != nil { - payload, err := MakePayload(m.fuzzer, m.head.NumberU64()+1, + payload, err := MakePayload(m.fuzzer, head.NumberU64()+1, attrs.Timestamp, update.HeadBlockHash, attrs.SuggestedFeeRecipient, attrs.Random, attrs.BeaconRoot) if err != nil { return engine.ForkChoiceResponse{}, err @@ -356,7 +430,7 @@ func (m *engineMock) ForkchoiceUpdatedV3(ctx context.Context, update engine.Fork } log.Debug(ctx, "Engine mock forkchoice updated", - "height", m.head.NumberU64(), + "height", head.NumberU64(), log.Hex7("forkchoice", update.HeadBlockHash.Bytes()), ) @@ -410,7 +484,7 @@ func MakePayload(fuzzer *fuzz.Fuzzer, height uint64, timestamp uint64, parentHas header.ParentBeaconRoot = beaconRoot // Convert header to block - block := types.NewBlock(&header, nil, nil, trie.NewStackTrie(nil)) + block := types.NewBlock(&header, &types.Body{Withdrawals: make([]*types.Withdrawal, 0)}, nil, trie.NewStackTrie(nil)) // Convert block to payload env := engine.BlockToExecutableData(block, big.NewInt(0), nil) From 71cb8aa1e6b30f18d8792876d11a0c417fc53cfc Mon Sep 17 00:00:00 2001 From: zsystm <124245155+zsystm@users.noreply.github.com> Date: Wed, 2 Oct 2024 08:17:33 +0900 Subject: [PATCH 20/29] feat(cmd): one block rollback (#157) add cometBFT's one block rollback cmd --- client/app/start.go | 44 ++++++++++++++++++++++++++++++ client/cmd/cmd.go | 60 +++++++++++++++++++++++++++++++++++++++++ client/cmd/flags.go | 4 +++ client/config/config.go | 1 + 4 files changed, 109 insertions(+) diff --git a/client/app/start.go b/client/app/start.go index ed9c8f36..5cd677ef 100644 --- a/client/app/start.go +++ b/client/app/start.go @@ -184,6 +184,50 @@ func Start(ctx context.Context, cfg Config) (func(context.Context) error, error) }, nil } +// TODO: Refactor CreateApp() to be used within the Start function, as most of the code originates from there. +func CreateApp(ctx context.Context, cfg Config) *App { + privVal, err := loadPrivVal(cfg) + if err != nil { + panic(errors.Wrap(err, "load validator key")) + } + + db, err := dbm.NewDB("application", cfg.BackendType(), cfg.DataDir()) + if err != nil { + panic(errors.Wrap(err, "create db")) + } + + baseAppOpts, err := makeBaseAppOpts(cfg) + if err != nil { + panic(errors.Wrap(err, "make base app opts")) + } + + engineCl, err := newEngineClient(ctx, cfg) + if err != nil { + panic(err) + } + + //nolint:contextcheck // False positive + app, err := newApp( + newSDKLogger(ctx), + db, + engineCl, + baseAppOpts..., + ) + if err != nil { + panic(errors.Wrap(err, "create app")) + } + app.Keepers.EVMEngKeeper.SetBuildDelay(cfg.EVMBuildDelay) + app.Keepers.EVMEngKeeper.SetBuildOptimistic(cfg.EVMBuildOptimistic) + + addr, err := k1util.PubKeyToAddress(privVal.Key.PrivKey.PubKey()) + if err != nil { + panic(errors.Wrap(err, "convert validator pubkey to address")) + } + app.Keepers.EVMEngKeeper.SetValidatorAddress(addr) + + return app +} + func newCometNode(ctx context.Context, cfg *cmtcfg.Config, app *App, privVal cmttypes.PrivValidator, ) (*node.Node, error) { nodeKey, err := p2p.LoadOrGenNodeKey(cfg.NodeKeyFile()) diff --git a/client/cmd/cmd.go b/client/cmd/cmd.go index 9d742e7e..295ac820 100644 --- a/client/cmd/cmd.go +++ b/client/cmd/cmd.go @@ -3,13 +3,16 @@ package cmd import ( "context" + "fmt" + cmtcmd "github.com/cometbft/cometbft/cmd/cometbft/commands" "github.com/spf13/cobra" "github.com/piplabs/story/client/app" storycfg "github.com/piplabs/story/client/config" "github.com/piplabs/story/lib/buildinfo" libcmd "github.com/piplabs/story/lib/cmd" + "github.com/piplabs/story/lib/errors" "github.com/piplabs/story/lib/log" ) @@ -23,6 +26,7 @@ func New() *cobra.Command { buildinfo.NewVersionCmd(), newValidatorCmds(), newStatusCmd(), + newRollbackCmd(app.CreateApp), ) } @@ -60,3 +64,59 @@ func newRunCmd(name string, runFunc func(context.Context, app.Config) error) *co return cmd } + +// newRollbackCmd returns a new cobra command that rolls back one block of the story consensus client. +func newRollbackCmd(appCreateFunc func(context.Context, app.Config) *app.App) *cobra.Command { + storyCfg := storycfg.DefaultConfig() + logCfg := log.DefaultConfig() + + cmd := &cobra.Command{ + Use: "rollback", + Short: "rollback Cosmos SDK and CometBFT state by one height", + Long: ` +A state rollback is performed to recover from an incorrect application state transition, +when CometBFT has persisted an incorrect app hash and is thus unable to make +progress. Rollback overwrites a state at height n with the state at height n - 1. +The application also rolls back to height n - 1. No blocks are removed, so upon +restarting CometBFT the transactions in block n will be re-executed against the +application. +`, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx, err := log.Init(cmd.Context(), logCfg) + if err != nil { + return err + } + if err := libcmd.LogFlags(ctx, cmd.Flags()); err != nil { + return err + } + + cometCfg, err := parseCometConfig(ctx, storyCfg.HomeDir) + if err != nil { + return err + } + + app := appCreateFunc(ctx, app.Config{ + Config: storyCfg, + Comet: cometCfg, + }) + height, hash, err := cmtcmd.RollbackState(&cometCfg, storyCfg.RemoveBlock) + if err != nil { + return errors.Wrap(err, "failed to rollback CometBFT state") + } + + if err = app.CommitMultiStore().RollbackToVersion(height); err != nil { + return errors.Wrap(err, "failed to rollback to version") + } + + fmt.Printf("Rolled back state to height %d and hash %X", height, hash) + + return nil + }, + } + + bindRunFlags(cmd, &storyCfg) + bindRollbackFlags(cmd, &storyCfg) + log.BindFlags(cmd.Flags(), &logCfg) + + return cmd +} diff --git a/client/cmd/flags.go b/client/cmd/flags.go index b787b9dc..b0de2471 100644 --- a/client/cmd/flags.go +++ b/client/cmd/flags.go @@ -120,6 +120,10 @@ func bindStatusFlags(flags *pflag.FlagSet, cfg *StatusConfig) { libcmd.BindHomeFlag(flags, &cfg.HomeDir) } +func bindRollbackFlags(cmd *cobra.Command, cfg *config.Config) { + cmd.Flags().BoolVar(&cfg.RemoveBlock, "hard", false, "remove last block as well as state") +} + // Flag Validation func validateFlags(flags map[string]string) error { diff --git a/client/config/config.go b/client/config/config.go index c9bba36b..0a6dda76 100644 --- a/client/config/config.go +++ b/client/config/config.go @@ -160,6 +160,7 @@ type Config struct { ExternalAddress string Seeds string SeedMode bool + RemoveBlock bool // See cosmos-sdk/server/rollback.go } // ConfigFile returns the default path to the toml story config file. From 4dc73c8476cb29f22a8f83d7c4a937862c5cfa4c Mon Sep 17 00:00:00 2001 From: zsystm <124245155+zsystm@users.noreply.github.com> Date: Wed, 2 Oct 2024 08:36:28 +0900 Subject: [PATCH 21/29] test(evmengine): add test cases for abci (#143) increased coverage to 83.8% added mock getPayloadV3Func for mocking getPayloadV3 --- .../x/evmengine/keeper/abci_internal_test.go | 297 +++++++++++++++++- 1 file changed, 286 insertions(+), 11 deletions(-) diff --git a/client/x/evmengine/keeper/abci_internal_test.go b/client/x/evmengine/keeper/abci_internal_test.go index 21e00c26..0206a04c 100644 --- a/client/x/evmengine/keeper/abci_internal_test.go +++ b/client/x/evmengine/keeper/abci_internal_test.go @@ -40,8 +40,11 @@ import ( moduletestutil "github.com/piplabs/story/client/x/evmengine/testutil" etypes "github.com/piplabs/story/client/x/evmengine/types" + "github.com/piplabs/story/lib/errors" "github.com/piplabs/story/lib/ethclient" "github.com/piplabs/story/lib/ethclient/mock" + "github.com/piplabs/story/lib/k1util" + "github.com/piplabs/story/lib/tutil" "go.uber.org/mock/gomock" ) @@ -51,6 +54,7 @@ var zeroAddr common.Address func TestKeeper_PrepareProposal(t *testing.T) { t.Parallel() + optimisticPayloadHeight := uint64(5) // TestRunErrScenarios tests various error scenarios in the PrepareProposal function. // It covers cases where different errors are encountered during the preparation of a proposal, // such as when no transactions are provided, when errors occur while fetching block information, @@ -70,20 +74,44 @@ func TestKeeper_PrepareProposal(t *testing.T) { mockEngine: mockEngineAPI{}, mockClient: mock.MockClient{}, req: &abci.RequestPrepareProposal{ - Txs: nil, // Set to nil to simulate no transactions - Height: 1, // Set height to 1 for this test case - Time: time.Now(), // Set time to current time or mock a time + Txs: nil, // Set to nil to simulate no transactions + Height: 1, // Set height to 1 for this test case + Time: time.Now(), // Set time to current time or mock a time + MaxTxBytes: cmttypes.MaxBlockSizeBytes, }, wantErr: false, }, + { + name: "max bytes is less than 9/10 of max block size", + mockEngine: mockEngineAPI{}, + mockClient: mock.MockClient{}, + req: &abci.RequestPrepareProposal{MaxTxBytes: cmttypes.MaxBlockSizeBytes * 1 / 10}, + wantErr: true, + }, { name: "with transactions", mockEngine: mockEngineAPI{}, mockClient: mock.MockClient{}, req: &abci.RequestPrepareProposal{ - Txs: [][]byte{[]byte("tx1")}, // simulate transactions - Height: 1, - Time: time.Now(), + Txs: [][]byte{[]byte("tx1")}, // simulate transactions + Height: 1, + Time: time.Now(), + MaxTxBytes: cmttypes.MaxBlockSizeBytes, + }, + wantErr: true, + }, + { + name: "failed to peek eligible withdrawals", + mockEngine: mockEngineAPI{}, + mockClient: mock.MockClient{}, + setupMocks: func(esk *moduletestutil.MockEvmStakingKeeper) { + esk.EXPECT().PeekEligibleWithdrawals(gomock.Any()).Return(nil, errors.New("failed to peek eligible withdrawals")) + }, + req: &abci.RequestPrepareProposal{ + Txs: nil, // Set to nil to simulate no transactions + Height: 2, + Time: time.Now(), + MaxTxBytes: cmttypes.MaxBlockSizeBytes, }, wantErr: true, }, @@ -111,9 +139,70 @@ func TestKeeper_PrepareProposal(t *testing.T) { }, mockClient: mock.MockClient{}, req: &abci.RequestPrepareProposal{ - Txs: nil, - Height: 2, - Time: time.Now(), + Txs: nil, + Height: 2, + Time: time.Now(), + MaxTxBytes: cmttypes.MaxBlockSizeBytes, + }, + wantErr: true, + setupMocks: func(esk *moduletestutil.MockEvmStakingKeeper) { + esk.EXPECT().PeekEligibleWithdrawals(gomock.Any()).Return(nil, nil) + }, + }, + { + name: "unknown payload", + mockEngine: mockEngineAPI{ + forkchoiceUpdatedV3Func: func(ctx context.Context, update eengine.ForkchoiceStateV1, + payloadAttributes *eengine.PayloadAttributes) (eengine.ForkChoiceResponse, error) { + return eengine.ForkChoiceResponse{ + PayloadStatus: eengine.PayloadStatusV1{ + Status: eengine.VALID, + LatestValidHash: nil, + ValidationError: nil, + }, + PayloadID: &eengine.PayloadID{0x1}, + }, nil + }, + getPayloadV3Func: func(ctx context.Context, id eengine.PayloadID) (*eengine.ExecutionPayloadEnvelope, error) { + return &eengine.ExecutionPayloadEnvelope{}, errors.New("Unknown payload") + }, + }, + mockClient: mock.MockClient{}, + req: &abci.RequestPrepareProposal{ + Txs: nil, + Height: 2, + Time: time.Now(), + MaxTxBytes: cmttypes.MaxBlockSizeBytes, + }, + wantErr: true, + setupMocks: func(esk *moduletestutil.MockEvmStakingKeeper) { + esk.EXPECT().PeekEligibleWithdrawals(gomock.Any()).Return(nil, nil) + }, + }, + { + name: "optimistic payload exists but unknown payload is returned by EL", + mockEngine: mockEngineAPI{ + forkchoiceUpdatedV3Func: func(ctx context.Context, update eengine.ForkchoiceStateV1, + payloadAttributes *eengine.PayloadAttributes) (eengine.ForkChoiceResponse, error) { + return eengine.ForkChoiceResponse{ + PayloadStatus: eengine.PayloadStatusV1{ + Status: eengine.VALID, + LatestValidHash: nil, + ValidationError: nil, + }, + PayloadID: &eengine.PayloadID{0x1}, + }, nil + }, + getPayloadV3Func: func(ctx context.Context, id eengine.PayloadID) (*eengine.ExecutionPayloadEnvelope, error) { + return &eengine.ExecutionPayloadEnvelope{}, errors.New("Unknown payload") + }, + }, + mockClient: mock.MockClient{}, + req: &abci.RequestPrepareProposal{ + Txs: nil, + Height: int64(optimisticPayloadHeight), + Time: time.Now(), + MaxTxBytes: cmttypes.MaxBlockSizeBytes, }, wantErr: true, setupMocks: func(esk *moduletestutil.MockEvmStakingKeeper) { @@ -145,8 +234,8 @@ func TestKeeper_PrepareProposal(t *testing.T) { require.NoError(t, err) k.SetValidatorAddress(common.BytesToAddress([]byte("test"))) populateGenesisHead(ctx, t, k) - - tt.req.MaxTxBytes = cmttypes.MaxBlockSizeBytes + // Set an optimistic payload + k.setOptimisticPayload(&eengine.PayloadID{}, optimisticPayloadHeight) _, err = k.PrepareProposal(withRandomErrs(t, ctx), tt.req) if (err != nil) != tt.wantErr { @@ -213,6 +302,187 @@ func TestKeeper_PrepareProposal(t *testing.T) { }) } +func TestKeeper_PostFinalize(t *testing.T) { + t.Parallel() + payloadID := &eengine.PayloadID{0x1} + payloadFailedToSet := func(k *Keeper) { + id, _, _ := k.getOptimisticPayload() + require.Nil(t, id) + } + payloadWellSet := func(k *Keeper) { + id, _, _ := k.getOptimisticPayload() + require.NotNil(t, id) + require.Equal(t, payloadID, id) + } + tests := []struct { + name string + mockEngine mockEngineAPI + mockClient mock.MockClient + wantErr bool + enableOptimistic bool + setupMocks func(esk *moduletestutil.MockEvmStakingKeeper) + postStateCheck func(k *Keeper) + }{ + { + name: "nothing happens when enableOptimistic is false", + mockEngine: mockEngineAPI{}, + mockClient: mock.MockClient{}, + wantErr: false, + enableOptimistic: false, + postStateCheck: payloadFailedToSet, + }, + { + name: "fail: peek eligible withdrawals", + mockEngine: mockEngineAPI{}, + mockClient: mock.MockClient{}, + wantErr: false, + enableOptimistic: true, + setupMocks: func(esk *moduletestutil.MockEvmStakingKeeper) { + esk.EXPECT().PeekEligibleWithdrawals(gomock.Any()).Return(nil, errors.New("failed to peek eligible withdrawals")) + }, + postStateCheck: payloadFailedToSet, + }, + { + name: "fail: EL is syncing", + mockEngine: mockEngineAPI{ + forkchoiceUpdatedV3Func: func(ctx context.Context, update eengine.ForkchoiceStateV1, + payloadAttributes *eengine.PayloadAttributes) (eengine.ForkChoiceResponse, error) { + return eengine.ForkChoiceResponse{ + PayloadStatus: eengine.PayloadStatusV1{ + Status: eengine.SYNCING, + LatestValidHash: nil, + ValidationError: nil, + }, + PayloadID: payloadID, + }, nil + }, + }, + mockClient: mock.MockClient{}, + wantErr: false, + enableOptimistic: true, + setupMocks: func(esk *moduletestutil.MockEvmStakingKeeper) { + esk.EXPECT().PeekEligibleWithdrawals(gomock.Any()).Return(nil, nil) + }, + postStateCheck: payloadFailedToSet, + }, + { + name: "fail: invalid payload", + mockEngine: mockEngineAPI{ + forkchoiceUpdatedV3Func: func(ctx context.Context, update eengine.ForkchoiceStateV1, + payloadAttributes *eengine.PayloadAttributes) (eengine.ForkChoiceResponse, error) { + return eengine.ForkChoiceResponse{ + PayloadStatus: eengine.PayloadStatusV1{ + Status: eengine.INVALID, + LatestValidHash: nil, + ValidationError: nil, + }, + PayloadID: payloadID, + }, nil + }, + }, + mockClient: mock.MockClient{}, + wantErr: false, + enableOptimistic: true, + setupMocks: func(esk *moduletestutil.MockEvmStakingKeeper) { + esk.EXPECT().PeekEligibleWithdrawals(gomock.Any()).Return(nil, nil) + }, + postStateCheck: payloadFailedToSet, + }, + { + name: "fail: unknown status from EL", + mockEngine: mockEngineAPI{ + forkchoiceUpdatedV3Func: func(ctx context.Context, update eengine.ForkchoiceStateV1, + payloadAttributes *eengine.PayloadAttributes) (eengine.ForkChoiceResponse, error) { + return eengine.ForkChoiceResponse{ + PayloadStatus: eengine.PayloadStatusV1{ + Status: "unknown status", + LatestValidHash: nil, + ValidationError: nil, + }, + PayloadID: payloadID, + }, nil + }, + }, + mockClient: mock.MockClient{}, + wantErr: false, + enableOptimistic: true, + setupMocks: func(esk *moduletestutil.MockEvmStakingKeeper) { + esk.EXPECT().PeekEligibleWithdrawals(gomock.Any()).Return(nil, nil) + }, + postStateCheck: payloadFailedToSet, + }, + { + name: "pass", + mockEngine: mockEngineAPI{ + forkchoiceUpdatedV3Func: func(ctx context.Context, update eengine.ForkchoiceStateV1, + payloadAttributes *eengine.PayloadAttributes) (eengine.ForkChoiceResponse, error) { + return eengine.ForkChoiceResponse{ + PayloadStatus: eengine.PayloadStatusV1{ + Status: eengine.VALID, + LatestValidHash: nil, + ValidationError: nil, + }, + PayloadID: &eengine.PayloadID{0x1}, + }, nil + }, + }, + mockClient: mock.MockClient{}, + wantErr: false, + enableOptimistic: true, + setupMocks: func(esk *moduletestutil.MockEvmStakingKeeper) { + esk.EXPECT().PeekEligibleWithdrawals(gomock.Any()).Return(nil, nil) + }, + postStateCheck: payloadWellSet, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + cdc := getCodec(t) + txConfig := authtx.NewTxConfig(cdc, nil) + + ctrl := gomock.NewController(t) + ak := moduletestutil.NewMockAccountKeeper(ctrl) + esk := moduletestutil.NewMockEvmStakingKeeper(ctrl) + uk := moduletestutil.NewMockUpgradeKeeper(ctrl) + + if tt.setupMocks != nil { + tt.setupMocks(esk) + } + + var err error + + cmtAPI := newMockCometAPI(t, nil) + // set the header and proposer so we have the correct next proposer + header := cmtproto.Header{Height: 1, AppHash: tutil.RandomHash().Bytes()} + header.ProposerAddress = cmtAPI.validatorSet.Validators[0].Address + nxtAddr, err := k1util.PubKeyToAddress(cmtAPI.validatorSet.Validators[1].PubKey) + require.NoError(t, err) + ctx, storeKey, storeService := setupCtxStore(t, &header) + ctx = ctx.WithExecMode(sdk.ExecModeFinalize) + tt.mockEngine.EngineClient, err = ethclient.NewEngineMock(storeKey) + require.NoError(t, err) + + k, err := NewKeeper(cdc, storeService, &tt.mockEngine, &tt.mockClient, txConfig, ak, esk, uk) + require.NoError(t, err) + k.SetCometAPI(cmtAPI) + k.SetValidatorAddress(nxtAddr) + populateGenesisHead(ctx, t, k) + k.buildOptimistic = tt.enableOptimistic + + err = k.PostFinalize(ctx) + if (err != nil) != tt.wantErr { + t.Errorf("PostFinalize() error = %v, wantErr %v", err, tt.wantErr) + return + } + if tt.postStateCheck != nil { + tt.postStateCheck(k) + } + }) + } +} + // appendMsgToTx appends the given message to the unpacked transaction and returns the new packed transaction bytes. func appendMsgToTx(t *testing.T, txConfig client.TxConfig, txBytes []byte, msg sdk.Msg) []byte { t.Helper() @@ -303,6 +573,7 @@ type mockEngineAPI struct { mock ethclient.EngineClient // avoid repeating the implementation but also allow for custom implementations of mocks headerByTypeFunc func(context.Context, ethclient.HeadType) (*types.Header, error) forkchoiceUpdatedV3Func func(context.Context, eengine.ForkchoiceStateV1, *eengine.PayloadAttributes) (eengine.ForkChoiceResponse, error) + getPayloadV3Func func(context.Context, eengine.PayloadID) (*eengine.ExecutionPayloadEnvelope, error) newPayloadV3Func func(context.Context, eengine.ExecutableData, []common.Hash, *common.Hash) (eengine.PayloadStatusV1, error) // forceInvalidNewPayloadV3 forces the NewPayloadV3 returns an invalid status. forceInvalidNewPayloadV3 bool @@ -443,6 +714,10 @@ func (m *mockEngineAPI) ForkchoiceUpdatedV3(ctx context.Context, update eengine. } func (m *mockEngineAPI) GetPayloadV3(ctx context.Context, payloadID eengine.PayloadID) (*eengine.ExecutionPayloadEnvelope, error) { + if m.getPayloadV3Func != nil { + return m.getPayloadV3Func(ctx, payloadID) + } + return m.mock.GetPayloadV3(ctx, payloadID) } From aa5040f4613723439a0160f86da700a8cf81fa6c Mon Sep 17 00:00:00 2001 From: Narangde Date: Mon, 7 Oct 2024 12:59:26 +0900 Subject: [PATCH 22/29] feat(genesis): disable vote extension by default (#173) --- client/genutil/defaults.go | 1 - client/genutil/genutil_internal_test.go | 2 +- lib/netconf/iliad/genesis.json | 2 +- lib/netconf/local/genesis.json | 2 +- lib/tutil/testdata/genesis.json | 2 +- 5 files changed, 4 insertions(+), 5 deletions(-) diff --git a/client/genutil/defaults.go b/client/genutil/defaults.go index 75f2c39f..93b55e0c 100644 --- a/client/genutil/defaults.go +++ b/client/genutil/defaults.go @@ -5,7 +5,6 @@ import "github.com/cometbft/cometbft/types" // DefaultConsensusParams returns the default cometBFT consensus params for story protocol. func DefaultConsensusParams() *types.ConsensusParams { resp := types.DefaultConsensusParams() - resp.ABCI.VoteExtensionsEnableHeight = 1 // Enable vote extensions from the start. resp.Validator.PubKeyTypes = []string{types.ABCIPubKeyTypeSecp256k1} // Only k1 keys. resp.Block.MaxBytes = -1 // Disable max block bytes, since we MUST include the whole EVM block, which is limited by max gas per block. diff --git a/client/genutil/genutil_internal_test.go b/client/genutil/genutil_internal_test.go index 508a843f..931c3a02 100644 --- a/client/genutil/genutil_internal_test.go +++ b/client/genutil/genutil_internal_test.go @@ -15,7 +15,7 @@ import ( func TestDefaultConsensusParams(t *testing.T) { t.Parallel() cons := defaultConsensusGenesis() - require.EqualValues(t, 1, cons.Params.ABCI.VoteExtensionsEnableHeight) + require.EqualValues(t, 0, cons.Params.ABCI.VoteExtensionsEnableHeight) require.EqualValues(t, types.ABCIPubKeyTypeSecp256k1, cons.Params.Validator.PubKeyTypes[0]) require.EqualValues(t, -1, cons.Params.Block.MaxBytes) require.EqualValues(t, -1, cons.Params.Block.MaxGas) diff --git a/lib/netconf/iliad/genesis.json b/lib/netconf/iliad/genesis.json index 1851eb7c..20660b49 100644 --- a/lib/netconf/iliad/genesis.json +++ b/lib/netconf/iliad/genesis.json @@ -19,7 +19,7 @@ "app": "0" }, "abci": { - "vote_extensions_enable_height": "1" + "vote_extensions_enable_height": "0" } }, "app_hash": "", diff --git a/lib/netconf/local/genesis.json b/lib/netconf/local/genesis.json index 22c1e0a0..bd2bbf5e 100644 --- a/lib/netconf/local/genesis.json +++ b/lib/netconf/local/genesis.json @@ -19,7 +19,7 @@ "app": "0" }, "abci": { - "vote_extensions_enable_height": "1" + "vote_extensions_enable_height": "0" } }, "app_hash": "", diff --git a/lib/tutil/testdata/genesis.json b/lib/tutil/testdata/genesis.json index 95180ec2..72fc8a12 100644 --- a/lib/tutil/testdata/genesis.json +++ b/lib/tutil/testdata/genesis.json @@ -19,7 +19,7 @@ ] }, "abci": { - "vote_extensions_enable_height": "1" + "vote_extensions_enable_height": "0" }, "version": {} }, From e43f5e94ac179695ec81acdb01138c3c8e16287c Mon Sep 17 00:00:00 2001 From: Ze Date: Mon, 7 Oct 2024 19:49:25 -0700 Subject: [PATCH 23/29] refactor(panic): remove unused or unnecessary panic code (#171) --- client/x/evmstaking/types/params.go | 24 ------ client/x/evmstaking/types/params_test.go | 82 -------------------- client/x/evmstaking/types/withdraw.go | 29 -------- client/x/evmstaking/types/withdraw_test.go | 87 ---------------------- lib/errors/errors.go | 2 +- lib/promutil/resetgauge.go | 71 ------------------ lib/promutil/resetgauge_test.go | 76 ------------------- 7 files changed, 1 insertion(+), 370 deletions(-) delete mode 100644 lib/promutil/resetgauge.go delete mode 100644 lib/promutil/resetgauge_test.go diff --git a/client/x/evmstaking/types/params.go b/client/x/evmstaking/types/params.go index b9a8e6ae..bdefd22c 100644 --- a/client/x/evmstaking/types/params.go +++ b/client/x/evmstaking/types/params.go @@ -2,10 +2,6 @@ package types import ( "fmt" - - "github.com/cosmos/cosmos-sdk/codec" - - "github.com/piplabs/story/lib/errors" ) // Staking params default values. @@ -35,26 +31,6 @@ func DefaultParams() Params { ) } -// unmarshal the current params value from store key or panic. -func MustUnmarshalParams(cdc *codec.LegacyAmino, value []byte) Params { - params, err := UnmarshalParams(cdc, value) - if err != nil { - panic(err) - } - - return params -} - -// unmarshal the current params value from store key. -func UnmarshalParams(cdc *codec.LegacyAmino, value []byte) (params Params, err error) { - err = cdc.Unmarshal(value, ¶ms) - if err != nil { - return params, errors.Wrap(err, "unmarshal params") - } - - return params, nil -} - func ValidateMaxWithdrawalPerBlock(v uint32) error { if v == 0 { return fmt.Errorf("max withdrawal per block must be positive: %d", v) diff --git a/client/x/evmstaking/types/params_test.go b/client/x/evmstaking/types/params_test.go index dab1c8cd..5c7019eb 100644 --- a/client/x/evmstaking/types/params_test.go +++ b/client/x/evmstaking/types/params_test.go @@ -37,88 +37,6 @@ func (suite *ParamsTestSuite) TestDefaultParams() { require.Equal(types.DefaultMinPartialWithdrawalAmount, params.MinPartialWithdrawalAmount) } -func (suite *ParamsTestSuite) TestMustUnmarshalParams() { - require := suite.Require() - maxWithdrawalPerBlock, maxSweepPerBlock, minPartialWithdrawalAmount := uint32(1), uint32(2), uint64(3) - params := types.NewParams(maxWithdrawalPerBlock, maxSweepPerBlock, minPartialWithdrawalAmount) - - tcs := []struct { - name string - input []byte - expected types.Params - expectPanic bool - }{ - { - name: "Unmarshal valid params bytes", - input: suite.encConf.Codec.MustMarshal(¶ms), - expected: types.Params{ - MaxWithdrawalPerBlock: maxWithdrawalPerBlock, - MaxSweepPerBlock: maxSweepPerBlock, - MinPartialWithdrawalAmount: minPartialWithdrawalAmount, - }, - }, - { - name: "Unmarshal invalid params bytes", - input: []byte{0x1, 0x2, 0x3}, - expectPanic: true, - }, - } - - for _, tc := range tcs { - suite.Run(tc.name, func() { - if tc.expectPanic { - require.Panics(func() { - types.MustUnmarshalParams(suite.encConf.Amino, tc.input) - }) - } else { - params := types.MustUnmarshalParams(suite.encConf.Amino, tc.input) - require.Equal(tc.expected, params) - } - }) - } -} - -func (suite *ParamsTestSuite) TestUnmarshalParams() { - require := suite.Require() - maxWithdrawalPerBlock, maxSweepPerBlock, minPartialWithdrawalAmount := uint32(1), uint32(2), uint64(3) - params := types.NewParams(maxWithdrawalPerBlock, maxSweepPerBlock, minPartialWithdrawalAmount) - - tcs := []struct { - name string - input []byte - expected types.Params - expectedError string - }{ - { - name: "Unmarshal valid params bytes", - input: suite.encConf.Codec.MustMarshal(¶ms), - expected: types.Params{ - MaxWithdrawalPerBlock: maxWithdrawalPerBlock, - MaxSweepPerBlock: maxSweepPerBlock, - MinPartialWithdrawalAmount: minPartialWithdrawalAmount, - }, - }, - { - name: "Unmarshal invalid params bytes", - input: []byte{0x1, 0x2, 0x3}, - expectedError: "unmarshal params", - }, - } - - for _, tc := range tcs { - suite.Run(tc.name, func() { - params, err := types.UnmarshalParams(suite.encConf.Amino, tc.input) - if tc.expectedError != "" { - require.Error(err) - require.Contains(err.Error(), tc.expectedError) - } else { - require.NoError(err) - require.Equal(tc.expected, params) - } - }) - } -} - func (suite *ParamsTestSuite) TestValidateMaxWithdrawalPerBlock() { require := suite.Require() diff --git a/client/x/evmstaking/types/withdraw.go b/client/x/evmstaking/types/withdraw.go index 00bc1994..60893a4b 100644 --- a/client/x/evmstaking/types/withdraw.go +++ b/client/x/evmstaking/types/withdraw.go @@ -4,10 +4,6 @@ import ( "strings" "cosmossdk.io/core/address" - - "github.com/cosmos/cosmos-sdk/codec" - - "github.com/piplabs/story/lib/errors" ) // Withdrawals is a collection of Withdrawal. @@ -48,28 +44,3 @@ func NewWithdrawalFromMsg(msg *MsgAddWithdrawal) Withdrawal { Amount: msg.Withdrawal.Amount, } } - -func MustMarshalWithdrawal(cdc codec.BinaryCodec, withdrawal *Withdrawal) []byte { - return cdc.MustMarshal(withdrawal) -} - -// MustUnmarshalWithdrawal return the unmarshaled withdrawal from bytes. -// Panics if fails. -func MustUnmarshalWithdrawal(cdc codec.BinaryCodec, value []byte) Withdrawal { - withdrawal, err := UnmarshalWithdrawal(cdc, value) - if err != nil { - panic(err) - } - - return withdrawal -} - -// UnmarshalWithdrawal returns the withdrawal. -func UnmarshalWithdrawal(cdc codec.BinaryCodec, value []byte) (withdrawal Withdrawal, err error) { - err = cdc.Unmarshal(value, &withdrawal) - if err != nil { - return withdrawal, errors.Wrap(err, "unmarshal withdrawal") - } - - return withdrawal, nil -} diff --git a/client/x/evmstaking/types/withdraw_test.go b/client/x/evmstaking/types/withdraw_test.go index db181212..29f873b4 100644 --- a/client/x/evmstaking/types/withdraw_test.go +++ b/client/x/evmstaking/types/withdraw_test.go @@ -109,93 +109,6 @@ func (suite *WithdrawTestSuite) TestNewWithdrawalFromMsg() { require.Equal(withdrawal, w, "NewWithdrawalFromMsg should return the same withdrawal") } -func (suite *WithdrawTestSuite) TestMustMarshalWithdraw() { - require := suite.Require() - withdrawal := types.NewWithdrawal(1, suite.delAddr, suite.valAddr, suite.evmAddr.String(), 1) - require.NotPanics(func() { - marshaled := types.MustMarshalWithdrawal(suite.encConf.Codec, &withdrawal) - require.NotNil(marshaled, "MarshalWithdrawal should not return nil") - }) -} - -func (suite *WithdrawTestSuite) TestUnmarshalWithdraw() { - require := suite.Require() - withdrawal := types.NewWithdrawal(1, suite.delAddr, suite.valAddr, suite.evmAddr.String(), 1) - - testCases := []struct { - name string - input []byte - expectedResult types.Withdrawal - expectError bool - }{ - { - name: "Unmarshal valid withdrawal bytes", - input: types.MustMarshalWithdrawal(suite.encConf.Codec, &withdrawal), - expectedResult: types.NewWithdrawal(1, suite.delAddr, suite.valAddr, suite.evmAddr.String(), 1), - expectError: false, - }, - { - name: "Unmarshal invalid withdrawal bytes", - input: []byte{1}, - expectedResult: types.Withdrawal{}, // Expecting an empty struct since it will fail - expectError: true, - }, - } - - // Iterate over test cases - for _, tc := range testCases { - suite.Run(tc.name, func() { - result, err := types.UnmarshalWithdrawal(suite.encConf.Codec, tc.input) - if tc.expectError { - require.Error(err) - } else { - require.NoError(err) - require.Equal(tc.expectedResult, result, "UnmarshalWithdrawal should return the correct withdrawal") - } - }) - } -} - -func (suite *WithdrawTestSuite) TestMustUnmarshalWithdraw() { - require := suite.Require() - withdrawal := types.NewWithdrawal(1, suite.delAddr, suite.valAddr, suite.evmAddr.String(), 1) - - testCases := []struct { - name string - input []byte - expected types.Withdrawal - expectPanic bool - }{ - { - name: "Unmarshal valid withdrawal bytes", - input: types.MustMarshalWithdrawal(suite.encConf.Codec, &withdrawal), - expected: types.NewWithdrawal(1, suite.delAddr, suite.valAddr, suite.evmAddr.String(), 1), - expectPanic: false, - }, - { - name: "Unmarshal invalid withdrawal bytes - panic", - input: []byte{1}, - expectPanic: true, - }, - } - - // Iterate over test cases - for _, tc := range testCases { - suite.Run(tc.name, func() { - if tc.expectPanic { - require.Panics(func() { - types.MustUnmarshalWithdrawal(suite.encConf.Codec, tc.input) - }) - } else { - require.NotPanics(func() { - result := types.MustUnmarshalWithdrawal(suite.encConf.Codec, tc.input) - require.Equal(tc.expected, result, "MustUnmarshalWithdrawal should return the correct withdrawal") - }) - } - }) - } -} - func TestWithdrawalTestSuite(t *testing.T) { t.Parallel() suite.Run(t, new(WithdrawTestSuite)) diff --git a/lib/errors/errors.go b/lib/errors/errors.go index 842b1ec2..3eb62630 100644 --- a/lib/errors/errors.go +++ b/lib/errors/errors.go @@ -22,7 +22,7 @@ func New(msg string, attrs ...any) error { //nolint:inamedparam // This function does custom wrapping and errors. func Wrap(err error, msg string, attrs ...any) error { if err == nil { - panic("wrap nil error") + return nil } // Support error types that do their own wrapping. diff --git a/lib/promutil/resetgauge.go b/lib/promutil/resetgauge.go deleted file mode 100644 index 17706087..00000000 --- a/lib/promutil/resetgauge.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright © 2022-2023 Obol Labs Inc. Licensed under the terms of a Business Source License 1.1 - -// Package promutil provides Prometheus utilities. -// This was copied from Obol's Charon repo. -package promutil - -import ( - "strings" - "sync" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" -) - -const separator = "|" - -// NewResetGaugeVec creates a new ResetGaugeVec. -func NewResetGaugeVec(opts prometheus.GaugeOpts, labelNames []string) *ResetGaugeVec { - return &ResetGaugeVec{ - inner: promauto.NewGaugeVec(opts, labelNames), - labels: make(map[string]bool), - } -} - -// ResetGaugeVec is a GaugeVec that can be reset which deletes all previously set labels. -// This is useful to clear out labels that are no longer present. -type ResetGaugeVec struct { - inner *prometheus.GaugeVec - - mu sync.Mutex - labels map[string]bool -} - -func (g *ResetGaugeVec) WithLabelValues(lvs ...string) prometheus.Gauge { - for _, lv := range lvs { - if strings.Contains(lv, separator) { - panic("label value cannot contain separator") - } - } - - g.mu.Lock() - defer g.mu.Unlock() - - g.labels[strings.Join(lvs, separator)] = true - - return g.inner.WithLabelValues(lvs...) -} - -// Reset deletes all previously set labels that match all the given label values. -// An empty slice will delete all previously set labels. -func (g *ResetGaugeVec) Reset(lvs ...string) { - g.mu.Lock() - defer g.mu.Unlock() - - for label := range g.labels { - match := true - for _, check := range lvs { - if !strings.Contains(label, check) { - match = false - break - } - } - - if !match { - continue - } - - g.inner.DeleteLabelValues(strings.Split(label, separator)...) - delete(g.labels, label) - } -} diff --git a/lib/promutil/resetgauge_test.go b/lib/promutil/resetgauge_test.go deleted file mode 100644 index f6450509..00000000 --- a/lib/promutil/resetgauge_test.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright © 2022-2023 Obol Labs Inc. Licensed under the terms of a Business Source License 1.1 - -package promutil_test - -import ( - "testing" - - "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" - - "github.com/piplabs/story/lib/promutil" -) - -//nolint:paralleltest // This test uses global prometheus registry so concurrent tests are not safe. -func TestResetGaugeVec(t *testing.T) { - const resetTest = "reset_test" - - var testResetGauge = promutil.NewResetGaugeVec(prometheus.GaugeOpts{ - Name: resetTest, - Help: "", - }, []string{"label0", "label1"}) - - testResetGauge.WithLabelValues("1", "a").Set(0) - assertVecLen(t, resetTest, 1) - - // Same labels, should not increase length - testResetGauge.WithLabelValues("1", "a").Set(1) - assertVecLen(t, resetTest, 1) - - testResetGauge.WithLabelValues("2", "b").Set(2) - assertVecLen(t, resetTest, 2) - - testResetGauge.Reset() - assertVecLen(t, resetTest, 0) - - testResetGauge.WithLabelValues("3", "c").Set(3) - assertVecLen(t, resetTest, 1) - - testResetGauge.WithLabelValues("3", "d").Set(3) - assertVecLen(t, resetTest, 2) - - testResetGauge.WithLabelValues("3", "e").Set(3) - assertVecLen(t, resetTest, 3) - - testResetGauge.WithLabelValues("4", "z").Set(4) - assertVecLen(t, resetTest, 4) - - testResetGauge.Reset("3", "c") - assertVecLen(t, resetTest, 3) - - testResetGauge.Reset("3") - assertVecLen(t, resetTest, 1) -} - -func assertVecLen(t *testing.T, name string, l int) { //nolint:unparam // abstracting name is fine even though it is always currently constant - t.Helper() - - metrics, err := prometheus.DefaultGatherer.Gather() - require.NoError(t, err) - - for _, metricFam := range metrics { - if metricFam.GetName() != name { - continue - } - - require.Len(t, metricFam.GetMetric(), l) - - return - } - - if l == 0 { - return - } - - require.Fail(t, "metric not found") -} From c7ee3a2687257800ef85ce0e57b27d111e51af97 Mon Sep 17 00:00:00 2001 From: Zerui Ge Date: Tue, 8 Oct 2024 10:59:36 -0700 Subject: [PATCH 24/29] feat(mint): customized mint module (#169) * feat(mint): port mint module from cosmos sdk v0.50.7 * feat(mint): port mint module from cosmos sdk v0.50.7 * feat(mint): port mint module from cosmos sdk v0.50.7 * feat(mint): port mint module from cosmos sdk v0.50.7 * feat(mint): remove useless parameters * feat(mint): new inflation function * feat(mint): mint parameters change event log processing * feat(mint): mint module api * feat(mint): placeholder for param change events * feat(mint): fix unit test * feat(mint): fix based on comments * feat(mint): fix based on comments * feat(mint): update readme * feat(mint): update readme --- client/app/app.go | 7 +- client/app/app_config.go | 6 +- client/app/keepers/types.go | 2 + client/server/mint.go | 28 + client/server/server.go | 3 + .../x/evmengine/keeper/abci_internal_test.go | 10 +- client/x/evmengine/keeper/keeper.go | 3 + .../evmengine/keeper/keeper_internal_test.go | 6 +- client/x/evmengine/keeper/msg_server.go | 3 + .../keeper/msg_server_internal_test.go | 4 +- .../keeper/proposal_server_internal_test.go | 4 +- .../keeper/upgrades_internal_test.go | 3 +- client/x/evmengine/module/depinject.go | 2 + .../testutil/expected_keepers_mocks.go | 37 ++ client/x/evmengine/types/expected_keepers.go | 4 + client/x/mint/README.md | 76 +++ client/x/mint/keeper/abci.go | 49 ++ client/x/mint/keeper/genesis.go | 26 + client/x/mint/keeper/genesis_test.go | 77 +++ client/x/mint/keeper/grpc_query.go | 27 + client/x/mint/keeper/grpc_query_test.go | 76 +++ client/x/mint/keeper/keeper.go | 110 ++++ client/x/mint/keeper/keeper_test.go | 91 +++ client/x/mint/keeper/params.go | 19 + .../x/mint/keeper/set_inflation_parameters.go | 14 + client/x/mint/module/depinject.go | 72 +++ client/x/mint/module/module.go | 136 +++++ client/x/mint/module/module.proto | 16 + client/x/mint/module/module.pulsar.go | 576 ++++++++++++++++++ .../x/mint/testutil/expected_keepers_mocks.go | 195 ++++++ client/x/mint/types/codec.go | 19 + client/x/mint/types/events.go | 6 + client/x/mint/types/expected_keepers.go | 32 + client/x/mint/types/genesis.go | 39 ++ client/x/mint/types/genesis.pb.go | 322 ++++++++++ client/x/mint/types/genesis.proto | 14 + client/x/mint/types/inflation_contract.go | 36 ++ client/x/mint/types/keys.go | 17 + client/x/mint/types/mint.pb.go | 393 ++++++++++++ client/x/mint/types/mint.proto | 18 + client/x/mint/types/params.go | 92 +++ client/x/mint/types/query.pb.go | 536 ++++++++++++++++ client/x/mint/types/query.proto | 26 + go.mod | 2 +- 44 files changed, 3219 insertions(+), 15 deletions(-) create mode 100644 client/server/mint.go create mode 100644 client/x/mint/README.md create mode 100644 client/x/mint/keeper/abci.go create mode 100644 client/x/mint/keeper/genesis.go create mode 100644 client/x/mint/keeper/genesis_test.go create mode 100644 client/x/mint/keeper/grpc_query.go create mode 100644 client/x/mint/keeper/grpc_query_test.go create mode 100644 client/x/mint/keeper/keeper.go create mode 100644 client/x/mint/keeper/keeper_test.go create mode 100644 client/x/mint/keeper/params.go create mode 100644 client/x/mint/keeper/set_inflation_parameters.go create mode 100644 client/x/mint/module/depinject.go create mode 100644 client/x/mint/module/module.go create mode 100644 client/x/mint/module/module.proto create mode 100644 client/x/mint/module/module.pulsar.go create mode 100644 client/x/mint/testutil/expected_keepers_mocks.go create mode 100644 client/x/mint/types/codec.go create mode 100644 client/x/mint/types/events.go create mode 100644 client/x/mint/types/expected_keepers.go create mode 100644 client/x/mint/types/genesis.go create mode 100644 client/x/mint/types/genesis.pb.go create mode 100644 client/x/mint/types/genesis.proto create mode 100644 client/x/mint/types/inflation_contract.go create mode 100644 client/x/mint/types/keys.go create mode 100644 client/x/mint/types/mint.pb.go create mode 100644 client/x/mint/types/mint.proto create mode 100644 client/x/mint/types/params.go create mode 100644 client/x/mint/types/query.pb.go create mode 100644 client/x/mint/types/query.proto diff --git a/client/app/app.go b/client/app/app.go index 094aafd6..6927b903 100644 --- a/client/app/app.go +++ b/client/app/app.go @@ -24,6 +24,7 @@ import ( "github.com/piplabs/story/client/app/keepers" "github.com/piplabs/story/client/comet" evmstakingkeeper "github.com/piplabs/story/client/x/evmstaking/keeper" + mintkeeper "github.com/piplabs/story/client/x/mint/keeper" "github.com/piplabs/story/lib/errors" "github.com/piplabs/story/lib/ethclient" @@ -36,7 +37,6 @@ import ( _ "github.com/cosmos/cosmos-sdk/x/distribution" // import for side-effects _ "github.com/cosmos/cosmos-sdk/x/genutil" // import for side-effects _ "github.com/cosmos/cosmos-sdk/x/gov" // import for side-effects - _ "github.com/cosmos/cosmos-sdk/x/mint" // import for side-effects _ "github.com/cosmos/cosmos-sdk/x/slashing" // import for side-effects _ "github.com/cosmos/cosmos-sdk/x/staking" // import for side-effects ) @@ -95,6 +95,7 @@ func newApp( &app.Keepers.EpochsKeeper, &app.Keepers.EvmStakingKeeper, &app.Keepers.EVMEngKeeper, + &app.Keepers.MintKeeper, ); err != nil { return nil, errors.Wrap(err, "dep inject") } @@ -207,3 +208,7 @@ func (a App) GetDistrKeeper() distrkeeper.Keeper { func (a App) GetUpgradeKeeper() *upgradekeeper.Keeper { return a.Keepers.UpgradeKeeper } + +func (a App) GetMintKeeper() mintkeeper.Keeper { + return a.Keepers.MintKeeper +} diff --git a/client/app/app_config.go b/client/app/app_config.go index faee4920..3a5c5b98 100644 --- a/client/app/app_config.go +++ b/client/app/app_config.go @@ -9,7 +9,6 @@ import ( distrmodulev1 "cosmossdk.io/api/cosmos/distribution/module/v1" genutilmodulev1 "cosmossdk.io/api/cosmos/genutil/module/v1" govmodulev1 "cosmossdk.io/api/cosmos/gov/module/v1" - mintmodulev1 "cosmossdk.io/api/cosmos/mint/module/v1" slashingmodulev1 "cosmossdk.io/api/cosmos/slashing/module/v1" stakingmodulev1 "cosmossdk.io/api/cosmos/staking/module/v1" txconfigv1 "cosmossdk.io/api/cosmos/tx/config/v1" @@ -26,7 +25,6 @@ import ( distrtypes "github.com/cosmos/cosmos-sdk/x/distribution/types" genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types" govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" - minttypes "github.com/cosmos/cosmos-sdk/x/mint/types" slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types" stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" @@ -36,6 +34,8 @@ import ( evmenginetypes "github.com/piplabs/story/client/x/evmengine/types" evmstakingmodule "github.com/piplabs/story/client/x/evmstaking/module" evmstakingtypes "github.com/piplabs/story/client/x/evmstaking/types" + mintmodule "github.com/piplabs/story/client/x/mint/module" + minttypes "github.com/piplabs/story/client/x/mint/types" ) // Bech32HRP is the human-readable-part of the Bech32 address format. @@ -215,7 +215,7 @@ var ( }, { Name: minttypes.ModuleName, - Config: appconfig.WrapAny(&mintmodulev1.Module{}), + Config: appconfig.WrapAny(&mintmodule.Module{}), }, }, }) diff --git a/client/app/keepers/types.go b/client/app/keepers/types.go index 7b21abdc..71e85a94 100644 --- a/client/app/keepers/types.go +++ b/client/app/keepers/types.go @@ -18,6 +18,7 @@ import ( epochskeeper "github.com/piplabs/story/client/x/epochs/keeper" evmengkeeper "github.com/piplabs/story/client/x/evmengine/keeper" evmstakingkeeper "github.com/piplabs/story/client/x/evmstaking/keeper" + mintkeeper "github.com/piplabs/story/client/x/mint/keeper" ) // Keepers includes all possible keepers. We separated it into a separate struct to make it easier to scaffold upgrades. @@ -36,4 +37,5 @@ type Keepers struct { EvmStakingKeeper *evmstakingkeeper.Keeper EVMEngKeeper *evmengkeeper.Keeper EpochsKeeper *epochskeeper.Keeper + MintKeeper mintkeeper.Keeper } diff --git a/client/server/mint.go b/client/server/mint.go new file mode 100644 index 00000000..76051229 --- /dev/null +++ b/client/server/mint.go @@ -0,0 +1,28 @@ +package server + +import ( + "net/http" + + "github.com/piplabs/story/client/server/utils" + "github.com/piplabs/story/client/x/mint/keeper" + minttypes "github.com/piplabs/story/client/x/mint/types" +) + +func (s *Server) initMintRoute() { + s.httpMux.HandleFunc("/mint/params", utils.SimpleWrap(s.aminoCodec, s.GetMintParams)) +} + +// GetMintParams queries params of the mint module. +func (s *Server) GetMintParams(r *http.Request) (resp any, err error) { + queryContext, err := s.createQueryContextByHeader(r) + if err != nil { + return nil, err + } + + queryResp, err := keeper.NewQuerier(s.store.GetMintKeeper()).Params(queryContext, &minttypes.QueryParamsRequest{}) + if err != nil { + return nil, err + } + + return queryResp, nil +} diff --git a/client/server/server.go b/client/server/server.go index ce31c476..d866c549 100644 --- a/client/server/server.go +++ b/client/server/server.go @@ -28,6 +28,7 @@ import ( "github.com/gorilla/mux" evmstakingkeeper "github.com/piplabs/story/client/x/evmstaking/keeper" + mintkeeper "github.com/piplabs/story/client/x/mint/keeper" ) type Store interface { @@ -39,6 +40,7 @@ type Store interface { GetBankKeeper() bankkeeper.Keeper GetDistrKeeper() distrkeeper.Keeper GetUpgradeKeeper() *upgradekeeper.Keeper + GetMintKeeper() mintkeeper.Keeper } type Server struct { @@ -121,6 +123,7 @@ func (s *Server) registerHandle() { s.initSlashingRoute() s.initStakingRoute() s.initUpgradeRoute() + s.initMintRoute() } func (s *Server) createQueryContextByHeader(r *http.Request) (sdk.Context, error) { diff --git a/client/x/evmengine/keeper/abci_internal_test.go b/client/x/evmengine/keeper/abci_internal_test.go index 0206a04c..eca2a1fc 100644 --- a/client/x/evmengine/keeper/abci_internal_test.go +++ b/client/x/evmengine/keeper/abci_internal_test.go @@ -221,6 +221,7 @@ func TestKeeper_PrepareProposal(t *testing.T) { ak := moduletestutil.NewMockAccountKeeper(ctrl) esk := moduletestutil.NewMockEvmStakingKeeper(ctrl) uk := moduletestutil.NewMockUpgradeKeeper(ctrl) + mk := moduletestutil.NewMockMintKeeper(ctrl) if tt.setupMocks != nil { tt.setupMocks(esk) @@ -230,7 +231,7 @@ func TestKeeper_PrepareProposal(t *testing.T) { tt.mockEngine.EngineClient, err = ethclient.NewEngineMock(storeKey) require.NoError(t, err) - k, err := NewKeeper(cdc, storeService, &tt.mockEngine, &tt.mockClient, txConfig, ak, esk, uk) + k, err := NewKeeper(cdc, storeService, &tt.mockEngine, &tt.mockClient, txConfig, ak, esk, uk, mk) require.NoError(t, err) k.SetValidatorAddress(common.BytesToAddress([]byte("test"))) populateGenesisHead(ctx, t, k) @@ -261,11 +262,11 @@ func TestKeeper_PrepareProposal(t *testing.T) { ak := moduletestutil.NewMockAccountKeeper(ctrl) esk := moduletestutil.NewMockEvmStakingKeeper(ctrl) uk := moduletestutil.NewMockUpgradeKeeper(ctrl) - + mk := moduletestutil.NewMockMintKeeper(ctrl) // Expected call for PeekEligibleWithdrawals esk.EXPECT().PeekEligibleWithdrawals(gomock.Any()).Return(nil, nil).AnyTimes() - keeper, err := NewKeeper(cdc, storeService, &mockEngine, mockClient, txConfig, ak, esk, uk) + keeper, err := NewKeeper(cdc, storeService, &mockEngine, mockClient, txConfig, ak, esk, uk, mk) require.NoError(t, err) keeper.SetValidatorAddress(common.BytesToAddress([]byte("test"))) populateGenesisHead(ctx, t, keeper) @@ -446,6 +447,7 @@ func TestKeeper_PostFinalize(t *testing.T) { ak := moduletestutil.NewMockAccountKeeper(ctrl) esk := moduletestutil.NewMockEvmStakingKeeper(ctrl) uk := moduletestutil.NewMockUpgradeKeeper(ctrl) + mk := moduletestutil.NewMockMintKeeper(ctrl) if tt.setupMocks != nil { tt.setupMocks(esk) @@ -464,7 +466,7 @@ func TestKeeper_PostFinalize(t *testing.T) { tt.mockEngine.EngineClient, err = ethclient.NewEngineMock(storeKey) require.NoError(t, err) - k, err := NewKeeper(cdc, storeService, &tt.mockEngine, &tt.mockClient, txConfig, ak, esk, uk) + k, err := NewKeeper(cdc, storeService, &tt.mockEngine, &tt.mockClient, txConfig, ak, esk, uk, mk) require.NoError(t, err) k.SetCometAPI(cmtAPI) k.SetValidatorAddress(nxtAddr) diff --git a/client/x/evmengine/keeper/keeper.go b/client/x/evmengine/keeper/keeper.go index 41e265a3..fe6a6ee7 100644 --- a/client/x/evmengine/keeper/keeper.go +++ b/client/x/evmengine/keeper/keeper.go @@ -41,6 +41,7 @@ type Keeper struct { accountKeeper types.AccountKeeper evmstakingKeeper types.EvmStakingKeeper upgradeKeeper types.UpgradeKeeper + mintKeeper types.MintKeeper upgradeContract *bindings.UpgradeEntrypoint @@ -64,6 +65,7 @@ func NewKeeper( ak types.AccountKeeper, esk types.EvmStakingKeeper, uk types.UpgradeKeeper, + mk types.MintKeeper, ) (*Keeper, error) { schema := &ormv1alpha1.ModuleSchemaDescriptor{SchemaFile: []*ormv1alpha1.ModuleSchemaDescriptor_FileEntry{ {Id: 1, ProtoFileName: File_client_x_evmengine_keeper_evmengine_proto.Path()}, @@ -94,6 +96,7 @@ func NewKeeper( evmstakingKeeper: esk, upgradeKeeper: uk, upgradeContract: upgradeContract, + mintKeeper: mk, }, nil } diff --git a/client/x/evmengine/keeper/keeper_internal_test.go b/client/x/evmengine/keeper/keeper_internal_test.go index 40fd34a9..d9ab3b58 100644 --- a/client/x/evmengine/keeper/keeper_internal_test.go +++ b/client/x/evmengine/keeper/keeper_internal_test.go @@ -49,12 +49,13 @@ func createTestKeeper(t *testing.T) (context.Context, *Keeper) { ak := moduletestutil.NewMockAccountKeeper(ctrl) esk := moduletestutil.NewMockEvmStakingKeeper(ctrl) uk := moduletestutil.NewMockUpgradeKeeper(ctrl) + mk := moduletestutil.NewMockMintKeeper(ctrl) ctx, storeKey, storeService := setupCtxStore(t, &header) mockEngine, err := newMockEngineAPI(storeKey, 0) require.NoError(t, err) - keeper, err := NewKeeper(cdc, storeService, &mockEngine, mockClient, txConfig, ak, esk, uk) + keeper, err := NewKeeper(cdc, storeService, &mockEngine, mockClient, txConfig, ak, esk, uk, mk) require.NoError(t, err) keeper.SetCometAPI(cmtAPI) @@ -78,11 +79,12 @@ func createKeeper(t *testing.T, args args) (sdk.Context, *mockCometAPI, *Keeper) ak := moduletestutil.NewMockAccountKeeper(ctrl) esk := moduletestutil.NewMockEvmStakingKeeper(ctrl) uk := moduletestutil.NewMockUpgradeKeeper(ctrl) + mk := moduletestutil.NewMockMintKeeper(ctrl) ctx, storeKey, storeService := setupCtxStore(t, &header) mockEngine, err := newMockEngineAPI(storeKey, 0) require.NoError(t, err) - keeper, err := NewKeeper(cdc, storeService, &mockEngine, mockClient, txConfig, ak, esk, uk) + keeper, err := NewKeeper(cdc, storeService, &mockEngine, mockClient, txConfig, ak, esk, uk, mk) require.NoError(t, err) keeper.SetCometAPI(cmtAPI) keeper.SetValidatorAddress(nxtAddr) diff --git a/client/x/evmengine/keeper/msg_server.go b/client/x/evmengine/keeper/msg_server.go index 01591507..fb77c4c4 100644 --- a/client/x/evmengine/keeper/msg_server.go +++ b/client/x/evmengine/keeper/msg_server.go @@ -115,6 +115,9 @@ func (s msgServer) ExecutionPayload(ctx context.Context, msg *types.MsgExecution if err := s.ProcessUpgradeEvents(ctx, payload.Number-1, msg.PrevPayloadEvents); err != nil { return nil, errors.Wrap(err, "deliver upgrade-related event logs") } + if err := s.mintKeeper.ProcessInflationEvents(ctx, payload.Number-1, msg.PrevPayloadEvents); err != nil { + return nil, errors.Wrap(err, "deliver inflation-related event logs") + } if err := s.updateExecutionHead(ctx, payload); err != nil { return nil, errors.Wrap(err, "update execution head") diff --git a/client/x/evmengine/keeper/msg_server_internal_test.go b/client/x/evmengine/keeper/msg_server_internal_test.go index e2378bca..b3f93cdc 100644 --- a/client/x/evmengine/keeper/msg_server_internal_test.go +++ b/client/x/evmengine/keeper/msg_server_internal_test.go @@ -40,6 +40,7 @@ func Test_msgServer_ExecutionPayload(t *testing.T) { ak := moduletestutil.NewMockAccountKeeper(ctrl) esk := moduletestutil.NewMockEvmStakingKeeper(ctrl) uk := moduletestutil.NewMockUpgradeKeeper(ctrl) + mk := moduletestutil.NewMockMintKeeper(ctrl) cmtAPI := newMockCometAPI(t, nil) // set the header and proposer so we have the correct next proposer @@ -53,7 +54,7 @@ func Test_msgServer_ExecutionPayload(t *testing.T) { evmLogProc := mockLogProvider{deliverErr: errors.New("test error")} mockEngine, err := newMockEngineAPI(storeKey, 2) require.NoError(t, err) - keeper, err := NewKeeper(cdc, storeService, &mockEngine, mockClient, txConfig, ak, esk, uk) + keeper, err := NewKeeper(cdc, storeService, &mockEngine, mockClient, txConfig, ak, esk, uk, mk) require.NoError(t, err) keeper.SetCometAPI(cmtAPI) keeper.SetValidatorAddress(nxtAddr) @@ -99,6 +100,7 @@ func Test_msgServer_ExecutionPayload(t *testing.T) { setup: func(c context.Context) sdk.Context { esk.EXPECT().DequeueEligibleWithdrawals(c).Return(nil, nil) esk.EXPECT().ProcessStakingEvents(c, gomock.Any(), gomock.Any()).Return(nil) + mk.EXPECT().ProcessInflationEvents(c, gomock.Any(), gomock.Any()).Return(nil) return sdk.UnwrapSDKContext(c) }, diff --git a/client/x/evmengine/keeper/proposal_server_internal_test.go b/client/x/evmengine/keeper/proposal_server_internal_test.go index 96c9d5ec..45e590a9 100644 --- a/client/x/evmengine/keeper/proposal_server_internal_test.go +++ b/client/x/evmengine/keeper/proposal_server_internal_test.go @@ -34,14 +34,14 @@ func Test_proposalServer_ExecutionPayload(t *testing.T) { ak := moduletestutil.NewMockAccountKeeper(ctrl) esk := moduletestutil.NewMockEvmStakingKeeper(ctrl) uk := moduletestutil.NewMockUpgradeKeeper(ctrl) - + mk := moduletestutil.NewMockMintKeeper(ctrl) esk.EXPECT().PeekEligibleWithdrawals(gomock.Any()).Return(nil, nil).AnyTimes() sdkCtx, storeKey, storeService := setupCtxStore(t, &cmtproto.Header{AppHash: tutil.RandomHash().Bytes()}) sdkCtx = sdkCtx.WithExecMode(sdk.ExecModeFinalize) mockEngine, err := newMockEngineAPI(storeKey, 0) require.NoError(t, err) - keeper, err := NewKeeper(cdc, storeService, &mockEngine, mockClient, txConfig, ak, esk, uk) + keeper, err := NewKeeper(cdc, storeService, &mockEngine, mockClient, txConfig, ak, esk, uk, mk) require.NoError(t, err) populateGenesisHead(sdkCtx, t, keeper) propSrv := NewProposalServer(keeper) diff --git a/client/x/evmengine/keeper/upgrades_internal_test.go b/client/x/evmengine/keeper/upgrades_internal_test.go index 722c69a1..8aea641f 100644 --- a/client/x/evmengine/keeper/upgrades_internal_test.go +++ b/client/x/evmengine/keeper/upgrades_internal_test.go @@ -262,12 +262,13 @@ func setupTestEnvironment(t *testing.T) (*Keeper, sdk.Context, *gomock.Controlle ak := moduletestutil.NewMockAccountKeeper(ctrl) esk := moduletestutil.NewMockEvmStakingKeeper(ctrl) uk := moduletestutil.NewMockUpgradeKeeper(ctrl) + mk := moduletestutil.NewMockMintKeeper(ctrl) ctx, storeKey, storeService := setupCtxStore(t, &header) mockEngine, err := newMockEngineAPI(storeKey, 0) require.NoError(t, err) - keeper, err := NewKeeper(cdc, storeService, &mockEngine, mockClient, txConfig, ak, esk, uk) + keeper, err := NewKeeper(cdc, storeService, &mockEngine, mockClient, txConfig, ak, esk, uk, mk) require.NoError(t, err) keeper.SetCometAPI(cmtAPI) nxtAddr, err := k1util.PubKeyToAddress(cmtAPI.validatorSet.Validators[1].PubKey) diff --git a/client/x/evmengine/module/depinject.go b/client/x/evmengine/module/depinject.go index 25b66a39..e35d3dd4 100644 --- a/client/x/evmengine/module/depinject.go +++ b/client/x/evmengine/module/depinject.go @@ -36,6 +36,7 @@ type ModuleInputs struct { AccountKeeper types.AccountKeeper EvmStakingKeeper types.EvmStakingKeeper UpgradeKeeper types.UpgradeKeeper + MintKeeper types.MintKeeper } type ModuleOutputs struct { @@ -56,6 +57,7 @@ func ProvideModule(in ModuleInputs) (ModuleOutputs, error) { in.AccountKeeper, in.EvmStakingKeeper, in.UpgradeKeeper, + in.MintKeeper, ) if err != nil { return ModuleOutputs{}, err diff --git a/client/x/evmengine/testutil/expected_keepers_mocks.go b/client/x/evmengine/testutil/expected_keepers_mocks.go index 3d40b6cc..cd96969d 100644 --- a/client/x/evmengine/testutil/expected_keepers_mocks.go +++ b/client/x/evmengine/testutil/expected_keepers_mocks.go @@ -219,3 +219,40 @@ func (mr *MockUpgradeKeeperMockRecorder) ScheduleUpgrade(ctx, plan any) *gomock. mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ScheduleUpgrade", reflect.TypeOf((*MockUpgradeKeeper)(nil).ScheduleUpgrade), ctx, plan) } + +// MockMintKeeper is a mock of MintKeeper interface. +type MockMintKeeper struct { + ctrl *gomock.Controller + recorder *MockMintKeeperMockRecorder +} + +// MockMintKeeperMockRecorder is the mock recorder for MockMintKeeper. +type MockMintKeeperMockRecorder struct { + mock *MockMintKeeper +} + +// NewMockMintKeeper creates a new mock instance. +func NewMockMintKeeper(ctrl *gomock.Controller) *MockMintKeeper { + mock := &MockMintKeeper{ctrl: ctrl} + mock.recorder = &MockMintKeeperMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockMintKeeper) EXPECT() *MockMintKeeperMockRecorder { + return m.recorder +} + +// ProcessInflationEvents mocks base method. +func (m *MockMintKeeper) ProcessInflationEvents(ctx context.Context, height uint64, logs []*types2.EVMEvent) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ProcessInflationEvents", ctx, height, logs) + ret0, _ := ret[0].(error) + return ret0 +} + +// ProcessInflationEvents indicates an expected call of ProcessInflationEvents. +func (mr *MockMintKeeperMockRecorder) ProcessInflationEvents(ctx, height, logs any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProcessInflationEvents", reflect.TypeOf((*MockMintKeeper)(nil).ProcessInflationEvents), ctx, height, logs) +} diff --git a/client/x/evmengine/types/expected_keepers.go b/client/x/evmengine/types/expected_keepers.go index 59ca321f..401899f8 100644 --- a/client/x/evmengine/types/expected_keepers.go +++ b/client/x/evmengine/types/expected_keepers.go @@ -28,3 +28,7 @@ type EvmStakingKeeper interface { type UpgradeKeeper interface { ScheduleUpgrade(ctx context.Context, plan upgradetypes.Plan) error } + +type MintKeeper interface { + ProcessInflationEvents(ctx context.Context, height uint64, logs []*EVMEvent) error +} diff --git a/client/x/mint/README.md b/client/x/mint/README.md new file mode 100644 index 00000000..600267c3 --- /dev/null +++ b/client/x/mint/README.md @@ -0,0 +1,76 @@ +--- +sidebar_position: 1 +--- + +# `x/mint` + +## Contents + +- [`x/mint`](#xmint) + - [Contents](#contents) + - [State](#state) + - [Params](#params) + - [Begin-Block](#begin-block) + - [Inflation amount calculation](#inflation-amount-calculation) + - [Parameters](#parameters) + - [Events](#events) + - [BeginBlocker](#beginblocker) + +## State + +### Params + +The mint module stores its params in state with the prefix of `0x01`, +it can be updated by authority via specific smart contract. + +* Params: `mint/params -> legacy_amino(params)` + +```protobuf +message Params { + option (amino.name) = "client/x/mint/Params"; + + // type of coin to mint + string mint_denom = 1; + // inflation amount per year + uint64 inflations_per_year = 2; + // expected blocks per year + uint64 blocks_per_year = ; +} +``` + +## Begin-Block + +Minting parameters are calculated and inflation paid at the beginning of each block. + +### Inflation amount calculation + +Inflation amount is calculated using an "inflation calculation function" that's +passed to the `NewAppModule` function. If no function is passed, then the SDK's +default inflation function will be used (`DefaultInflationCalculationFn`). In case a custom +inflation calculation logic is needed, this can be achieved by defining and +passing a function that matches `InflationCalculationFn`'s signature. + +```go +type InflationCalculationFn func(ctx sdk.Context, minter Minter, params Params, bondedRatio math.LegacyDec) math.LegacyDec +``` + +## Parameters + +The minting module contains the following parameters: + +| Key | Type | Example | +|---------------------|-----------------|------------------------------| +| MintDenom | string | "stake" | +| InflationsPerYear | string (dec) | "24625000000000000" | +| BlocksPerYear | string (uint64) | "6311520" | + + +## Events + +The minting module emits the following events: + +### BeginBlocker + +| Type | Attribute Key | Attribute Value | +|------|-------------------|--------------------| +| mint | amount | {amount} | diff --git a/client/x/mint/keeper/abci.go b/client/x/mint/keeper/abci.go new file mode 100644 index 00000000..3fb8198e --- /dev/null +++ b/client/x/mint/keeper/abci.go @@ -0,0 +1,49 @@ +package keeper + +import ( + "context" + + "cosmossdk.io/math" + + "github.com/cosmos/cosmos-sdk/telemetry" + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/piplabs/story/client/x/mint/types" +) + +// BeginBlocker mints new tokens for the previous block. +func (k Keeper) BeginBlocker(ctx context.Context, ic types.InflationCalculationFn) error { + defer telemetry.ModuleMeasureSince(types.ModuleName, telemetry.Now(), telemetry.MetricKeyBeginBlocker) + + params, err := k.Params.Get(ctx) + if err != nil { + return err + } + + // mint coins, update supply + mintedCoinAmt := ic(ctx, params, math.LegacyNewDec(0)) // NOTE: bondedRatio is not used in current implementation. + mintedCoin := sdk.NewCoin(params.MintDenom, mintedCoinAmt.TruncateInt()) + mintedCoins := sdk.NewCoins(mintedCoin) + if err := k.MintCoins(ctx, mintedCoins); err != nil { + return err + } + + // send the minted coins to the fee collector account + if err := k.AddCollectedFees(ctx, mintedCoins); err != nil { + return err + } + + if mintedCoin.Amount.IsInt64() { + defer telemetry.ModuleSetGauge(types.ModuleName, float32(mintedCoin.Amount.Int64()), "minted_tokens") + } + + sdkCtx := sdk.UnwrapSDKContext(ctx) + sdkCtx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeMint, + sdk.NewAttribute(sdk.AttributeKeyAmount, mintedCoin.Amount.String()), + ), + ) + + return nil +} diff --git a/client/x/mint/keeper/genesis.go b/client/x/mint/keeper/genesis.go new file mode 100644 index 00000000..3b12a540 --- /dev/null +++ b/client/x/mint/keeper/genesis.go @@ -0,0 +1,26 @@ +package keeper + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/piplabs/story/client/x/mint/types" +) + +// InitGenesis new mint genesis. +func (k Keeper) InitGenesis(ctx sdk.Context, ak types.AccountKeeper, data *types.GenesisState) { + if err := k.Params.Set(ctx, data.Params); err != nil { + panic(err) + } + + ak.GetModuleAccount(ctx, types.ModuleName) +} + +// ExportGenesis returns a GenesisState for a given context and keeper. +func (k Keeper) ExportGenesis(ctx sdk.Context) *types.GenesisState { + params, err := k.Params.Get(ctx) + if err != nil { + panic(err) + } + + return types.NewGenesisState(params) +} diff --git a/client/x/mint/keeper/genesis_test.go b/client/x/mint/keeper/genesis_test.go new file mode 100644 index 00000000..affa5818 --- /dev/null +++ b/client/x/mint/keeper/genesis_test.go @@ -0,0 +1,77 @@ +//nolint:paralleltest // just for testing +package keeper_test + +import ( + "testing" + + storetypes "cosmossdk.io/store/types" + + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/runtime" + "github.com/cosmos/cosmos-sdk/testutil" + sdk "github.com/cosmos/cosmos-sdk/types" + moduletestutil "github.com/cosmos/cosmos-sdk/types/module/testutil" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/suite" + + "github.com/piplabs/story/client/x/mint/keeper" + mintmodule "github.com/piplabs/story/client/x/mint/module" + minttestutil "github.com/piplabs/story/client/x/mint/testutil" + "github.com/piplabs/story/client/x/mint/types" +) + +var minterAcc = authtypes.NewEmptyModuleAccount(types.ModuleName, authtypes.Minter) + +type GenesisTestSuite struct { + suite.Suite + + sdkCtx sdk.Context + keeper keeper.Keeper + cdc codec.BinaryCodec + accountKeeper types.AccountKeeper + key *storetypes.KVStoreKey +} + +func TestGenesisTestSuite(t *testing.T) { + suite.Run(t, new(GenesisTestSuite)) +} + +func (s *GenesisTestSuite) SetupTest() { + key := storetypes.NewKVStoreKey(types.StoreKey) + testCtx := testutil.DefaultContextWithDB(s.T(), key, storetypes.NewTransientStoreKey("transient_test")) + encCfg := moduletestutil.MakeTestEncodingConfig(mintmodule.AppModuleBasic{}) + + // gomock initializations + ctrl := gomock.NewController(s.T()) + s.cdc = codec.NewProtoCodec(encCfg.InterfaceRegistry) + s.sdkCtx = testCtx.Ctx + s.key = key + + stakingKeeper := minttestutil.NewMockStakingKeeper(ctrl) + accountKeeper := minttestutil.NewMockAccountKeeper(ctrl) + bankKeeper := minttestutil.NewMockBankKeeper(ctrl) + s.accountKeeper = accountKeeper + accountKeeper.EXPECT().GetModuleAddress(minterAcc.Name).Return(minterAcc.GetAddress()) + accountKeeper.EXPECT().GetModuleAccount(s.sdkCtx, minterAcc.Name).Return(minterAcc) + + s.keeper = keeper.NewKeeper(s.cdc, runtime.NewKVStoreService(key), stakingKeeper, accountKeeper, bankKeeper, "") +} + +func (s *GenesisTestSuite) TestImportExportGenesis() { + genesisState := types.DefaultGenesisState() + genesisState.Params = types.NewParams( + "testDenom", + 24625000_000_000_000, + uint64(60*60*8766/5), + ) + + s.keeper.InitGenesis(s.sdkCtx, s.accountKeeper, genesisState) + + params, err := s.keeper.Params.Get(s.sdkCtx) + s.Require().Equal(genesisState.Params, params) + s.Require().NoError(err) + + genesisState2 := s.keeper.ExportGenesis(s.sdkCtx) + s.Require().Equal(genesisState, genesisState2) +} diff --git a/client/x/mint/keeper/grpc_query.go b/client/x/mint/keeper/grpc_query.go new file mode 100644 index 00000000..de614f23 --- /dev/null +++ b/client/x/mint/keeper/grpc_query.go @@ -0,0 +1,27 @@ +package keeper + +import ( + "context" + + "github.com/piplabs/story/client/x/mint/types" +) + +var _ types.QueryServer = Querier{} + +func NewQuerier(k Keeper) types.QueryServer { + return Querier{k} +} + +type Querier struct { + k Keeper +} + +// Params returns params of the mint module. +func (q Querier) Params(ctx context.Context, _ *types.QueryParamsRequest) (*types.QueryParamsResponse, error) { + params, err := q.k.Params.Get(ctx) + if err != nil { + return nil, err + } + + return &types.QueryParamsResponse{Params: params}, nil +} diff --git a/client/x/mint/keeper/grpc_query_test.go b/client/x/mint/keeper/grpc_query_test.go new file mode 100644 index 00000000..63b433d6 --- /dev/null +++ b/client/x/mint/keeper/grpc_query_test.go @@ -0,0 +1,76 @@ +//nolint:paralleltest // just for testing +package keeper_test + +import ( + gocontext "context" + "testing" + + storetypes "cosmossdk.io/store/types" + + "github.com/cosmos/cosmos-sdk/baseapp" + "github.com/cosmos/cosmos-sdk/runtime" + "github.com/cosmos/cosmos-sdk/testutil" + sdk "github.com/cosmos/cosmos-sdk/types" + moduletestutil "github.com/cosmos/cosmos-sdk/types/module/testutil" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/suite" + + "github.com/piplabs/story/client/x/mint/keeper" + mintmodule "github.com/piplabs/story/client/x/mint/module" + minttestutil "github.com/piplabs/story/client/x/mint/testutil" + "github.com/piplabs/story/client/x/mint/types" +) + +type MintTestSuite struct { + suite.Suite + + ctx sdk.Context + queryClient types.QueryClient + mintKeeper keeper.Keeper +} + +func (suite *MintTestSuite) SetupTest() { + encCfg := moduletestutil.MakeTestEncodingConfig(mintmodule.AppModuleBasic{}) + key := storetypes.NewKVStoreKey(types.StoreKey) + storeService := runtime.NewKVStoreService(key) + testCtx := testutil.DefaultContextWithDB(suite.T(), key, storetypes.NewTransientStoreKey("transient_test")) + suite.ctx = testCtx.Ctx + + // gomock initializations + ctrl := gomock.NewController(suite.T()) + accountKeeper := minttestutil.NewMockAccountKeeper(ctrl) + bankKeeper := minttestutil.NewMockBankKeeper(ctrl) + stakingKeeper := minttestutil.NewMockStakingKeeper(ctrl) + + accountKeeper.EXPECT().GetModuleAddress("mint").Return(sdk.AccAddress{}) + + suite.mintKeeper = keeper.NewKeeper( + encCfg.Codec, + storeService, + stakingKeeper, + accountKeeper, + bankKeeper, + authtypes.FeeCollectorName, + ) + + err := suite.mintKeeper.Params.Set(suite.ctx, types.DefaultParams()) + suite.Require().NoError(err) + + queryHelper := baseapp.NewQueryServerTestHelper(testCtx.Ctx, encCfg.InterfaceRegistry) + types.RegisterQueryServer(queryHelper, keeper.NewQuerier(suite.mintKeeper)) + + suite.queryClient = types.NewQueryClient(queryHelper) +} + +func (suite *MintTestSuite) TestGRPCParams() { + params, err := suite.queryClient.Params(gocontext.Background(), &types.QueryParamsRequest{}) + suite.Require().NoError(err) + kparams, err := suite.mintKeeper.Params.Get(suite.ctx) + suite.Require().NoError(err) + suite.Require().Equal(params.Params, kparams) +} + +func TestMintTestSuite(t *testing.T) { + suite.Run(t, new(MintTestSuite)) +} diff --git a/client/x/mint/keeper/keeper.go b/client/x/mint/keeper/keeper.go new file mode 100644 index 00000000..0f1d9394 --- /dev/null +++ b/client/x/mint/keeper/keeper.go @@ -0,0 +1,110 @@ +//nolint:revive // just use interface{} +package keeper + +import ( + "context" + "fmt" + + "cosmossdk.io/collections" + storetypes "cosmossdk.io/core/store" + "cosmossdk.io/log" + "cosmossdk.io/math" + + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + + evmenginetypes "github.com/piplabs/story/client/x/evmengine/types" + "github.com/piplabs/story/client/x/mint/types" + "github.com/piplabs/story/contracts/bindings" + clog "github.com/piplabs/story/lib/log" +) + +// Keeper of the mint store. +type Keeper struct { + cdc codec.BinaryCodec + storeService storetypes.KVStoreService + stakingKeeper types.StakingKeeper + bankKeeper types.BankKeeper + feeCollectorName string + + inflationUpdateContract *bindings.IPTokenStaking // (rayden) TODO + + Schema collections.Schema + Params collections.Item[types.Params] +} + +// NewKeeper creates a new mint Keeper instance. +func NewKeeper( + cdc codec.BinaryCodec, + storeService storetypes.KVStoreService, + sk types.StakingKeeper, + ak types.AccountKeeper, + bk types.BankKeeper, + feeCollectorName string, +) Keeper { + // ensure mint module account is set + if addr := ak.GetModuleAddress(types.ModuleName); addr == nil { + panic(fmt.Sprintf("the x/%s module account has not been set", types.ModuleName)) + } + + sb := collections.NewSchemaBuilder(storeService) + k := Keeper{ + cdc: cdc, + storeService: storeService, + stakingKeeper: sk, + bankKeeper: bk, + feeCollectorName: feeCollectorName, + inflationUpdateContract: nil, + Params: collections.NewItem(sb, types.ParamsKey, "params", codec.CollValue[types.Params](cdc)), + } + + schema, err := sb.Build() + if err != nil { + panic(err) + } + k.Schema = schema + + return k +} + +// Logger returns a module-specific logger. +func (k Keeper) Logger(ctx context.Context) log.Logger { + sdkCtx := sdk.UnwrapSDKContext(ctx) + return sdkCtx.Logger().With("module", "x/"+types.ModuleName) +} + +// StakingTokenSupply implements an alias call to the underlying staking keeper's +// StakingTokenSupply to be used in BeginBlocker. +func (k Keeper) StakingTokenSupply(ctx context.Context) (math.Int, error) { + return k.stakingKeeper.StakingTokenSupply(ctx) +} + +// BondedRatio implements an alias call to the underlying staking keeper's +// BondedRatio to be used in BeginBlocker. +func (k Keeper) BondedRatio(ctx context.Context) (math.LegacyDec, error) { + return k.stakingKeeper.BondedRatio(ctx) +} + +// MintCoins implements an alias call to the underlying supply keeper's +// MintCoins to be used in BeginBlocker. +func (k Keeper) MintCoins(ctx context.Context, newCoins sdk.Coins) error { + if newCoins.Empty() { + // skip as no coins need to be minted + return nil + } + + return k.bankKeeper.MintCoins(ctx, types.ModuleName, newCoins) +} + +// AddCollectedFees implements an alias call to the underlying supply keeper's +// AddCollectedFees to be used in BeginBlocker. +func (k Keeper) AddCollectedFees(ctx context.Context, fees sdk.Coins) error { + return k.bankKeeper.SendCoinsFromModuleToModule(ctx, types.ModuleName, k.feeCollectorName, fees) +} + +func (k Keeper) ProcessInflationEvents(ctx context.Context, height uint64, logs []*evmenginetypes.EVMEvent) error { + // (rayden) TODO + clog.Debug(ctx, "Processed inflation events", "height", height, "count", len(logs)) + + return nil +} diff --git a/client/x/mint/keeper/keeper_test.go b/client/x/mint/keeper/keeper_test.go new file mode 100644 index 00000000..c22fa287 --- /dev/null +++ b/client/x/mint/keeper/keeper_test.go @@ -0,0 +1,91 @@ +//nolint:paralleltest // just for testing +package keeper_test + +import ( + "testing" + + "cosmossdk.io/math" + storetypes "cosmossdk.io/store/types" + + "github.com/cosmos/cosmos-sdk/runtime" + "github.com/cosmos/cosmos-sdk/testutil" + sdk "github.com/cosmos/cosmos-sdk/types" + moduletestutil "github.com/cosmos/cosmos-sdk/types/module/testutil" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/suite" + + "github.com/piplabs/story/client/x/mint/keeper" + mintmodule "github.com/piplabs/story/client/x/mint/module" + minttestutil "github.com/piplabs/story/client/x/mint/testutil" + "github.com/piplabs/story/client/x/mint/types" +) + +type IntegrationTestSuite struct { + suite.Suite + + mintKeeper keeper.Keeper + ctx sdk.Context + stakingKeeper *minttestutil.MockStakingKeeper + bankKeeper *minttestutil.MockBankKeeper +} + +func TestKeeperTestSuite(t *testing.T) { + suite.Run(t, new(IntegrationTestSuite)) +} + +func (s *IntegrationTestSuite) SetupTest() { + encCfg := moduletestutil.MakeTestEncodingConfig(mintmodule.AppModuleBasic{}) + key := storetypes.NewKVStoreKey(types.StoreKey) + storeService := runtime.NewKVStoreService(key) + testCtx := testutil.DefaultContextWithDB(s.T(), key, storetypes.NewTransientStoreKey("transient_test")) + s.ctx = testCtx.Ctx + + // gomock initializations + ctrl := gomock.NewController(s.T()) + accountKeeper := minttestutil.NewMockAccountKeeper(ctrl) + bankKeeper := minttestutil.NewMockBankKeeper(ctrl) + stakingKeeper := minttestutil.NewMockStakingKeeper(ctrl) + + accountKeeper.EXPECT().GetModuleAddress(types.ModuleName).Return(sdk.AccAddress{}) + + s.mintKeeper = keeper.NewKeeper( + encCfg.Codec, + storeService, + stakingKeeper, + accountKeeper, + bankKeeper, + authtypes.FeeCollectorName, + ) + s.stakingKeeper = stakingKeeper + s.bankKeeper = bankKeeper + + s.Require().Equal(testCtx.Ctx.Logger().With("module", "x/"+types.ModuleName), + s.mintKeeper.Logger(testCtx.Ctx)) + + err := s.mintKeeper.Params.Set(s.ctx, types.DefaultParams()) + s.Require().NoError(err) +} + +func (s *IntegrationTestSuite) TestAliasFunctions() { + stakingTokenSupply := math.NewIntFromUint64(100000000000) + s.stakingKeeper.EXPECT().StakingTokenSupply(s.ctx).Return(stakingTokenSupply, nil) + tokenSupply, err := s.mintKeeper.StakingTokenSupply(s.ctx) + s.Require().NoError(err) + s.Require().Equal(tokenSupply, stakingTokenSupply) + + bondedRatio := math.LegacyNewDecWithPrec(15, 2) + s.stakingKeeper.EXPECT().BondedRatio(s.ctx).Return(bondedRatio, nil) + ratio, err := s.mintKeeper.BondedRatio(s.ctx) + s.Require().NoError(err) + s.Require().Equal(ratio, bondedRatio) + + coins := sdk.NewCoins(sdk.NewCoin("stake", math.NewInt(1000000))) + s.bankKeeper.EXPECT().MintCoins(s.ctx, types.ModuleName, coins).Return(nil) + s.Require().NoError(s.mintKeeper.MintCoins(s.ctx, sdk.NewCoins())) + s.Require().NoError(s.mintKeeper.MintCoins(s.ctx, coins)) + + fees := sdk.NewCoins(sdk.NewCoin("stake", math.NewInt(1000))) + s.bankKeeper.EXPECT().SendCoinsFromModuleToModule(s.ctx, types.ModuleName, authtypes.FeeCollectorName, fees).Return(nil) + s.Require().NoError(s.mintKeeper.AddCollectedFees(s.ctx, fees)) +} diff --git a/client/x/mint/keeper/params.go b/client/x/mint/keeper/params.go new file mode 100644 index 00000000..fa615d34 --- /dev/null +++ b/client/x/mint/keeper/params.go @@ -0,0 +1,19 @@ +package keeper + +import ( + "context" + + "github.com/piplabs/story/client/x/mint/types" +) + +func (k Keeper) GetParams(ctx context.Context) (types.Params, error) { + return k.Params.Get(ctx) +} + +func (k Keeper) SetParams(ctx context.Context, value types.Params) error { + if err := value.Validate(); err != nil { + return err + } + + return k.Params.Set(ctx, value) +} diff --git a/client/x/mint/keeper/set_inflation_parameters.go b/client/x/mint/keeper/set_inflation_parameters.go new file mode 100644 index 00000000..70de29cb --- /dev/null +++ b/client/x/mint/keeper/set_inflation_parameters.go @@ -0,0 +1,14 @@ +package keeper + +import ( + "context" + + "github.com/piplabs/story/contracts/bindings" +) + +//nolint:revive // (rayden) TODO +func (k Keeper) ProcessSetInflationParameters(ctx context.Context, ev *bindings.IPTokenStakingDeposit) error { + // (rayden) TODO + + return nil +} diff --git a/client/x/mint/module/depinject.go b/client/x/mint/module/depinject.go new file mode 100644 index 00000000..c1ef3699 --- /dev/null +++ b/client/x/mint/module/depinject.go @@ -0,0 +1,72 @@ +//nolint:revive // just use interface{} +package module + +import ( + "cosmossdk.io/core/appmodule" + "cosmossdk.io/core/store" + "cosmossdk.io/depinject" + + "github.com/cosmos/cosmos-sdk/codec" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + + "github.com/piplabs/story/client/x/mint/keeper" + "github.com/piplabs/story/client/x/mint/types" +) + +var _ depinject.OnePerModuleType = AppModule{} + +// IsOnePerModuleType implements the depinject.OnePerModuleType interface. +func (am AppModule) IsOnePerModuleType() {} + +// IsAppModule implements the appmodule.AppModule interface. +func (am AppModule) IsAppModule() {} + +//nolint:gochecknoinits // depinject +func init() { + appmodule.Register( + &Module{}, + appmodule.Provide(ProvideModule), + ) +} + +type ModuleInputs struct { + depinject.In + + ModuleKey depinject.OwnModuleKey + Config *Module + StoreService store.KVStoreService + Cdc codec.Codec + InflationCalculationFn types.InflationCalculationFn `optional:"true"` + + AccountKeeper types.AccountKeeper + BankKeeper types.BankKeeper + StakingKeeper types.StakingKeeper +} + +type ModuleOutputs struct { + depinject.Out + + MintKeeper keeper.Keeper + Module appmodule.AppModule +} + +func ProvideModule(in ModuleInputs) ModuleOutputs { + feeCollectorName := in.Config.GetFeeCollectorName() + if feeCollectorName == "" { + feeCollectorName = authtypes.FeeCollectorName + } + + k := keeper.NewKeeper( + in.Cdc, + in.StoreService, + in.StakingKeeper, + in.AccountKeeper, + in.BankKeeper, + feeCollectorName, + ) + + // when no inflation calculation function is provided it will use the default types.DefaultInflationCalculationFn + m := NewAppModule(in.Cdc, k, in.AccountKeeper, in.InflationCalculationFn) + + return ModuleOutputs{MintKeeper: k, Module: m} +} diff --git a/client/x/mint/module/module.go b/client/x/mint/module/module.go new file mode 100644 index 00000000..9b1cff08 --- /dev/null +++ b/client/x/mint/module/module.go @@ -0,0 +1,136 @@ +//nolint:revive // keep method receiver untouched +package module + +import ( + "context" + "encoding/json" + "fmt" + + "cosmossdk.io/core/appmodule" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" + gwruntime "github.com/grpc-ecosystem/grpc-gateway/runtime" + + "github.com/piplabs/story/client/x/mint/keeper" + "github.com/piplabs/story/client/x/mint/types" + "github.com/piplabs/story/lib/errors" +) + +// ConsensusVersion defines the current x/mint module consensus version. +const ConsensusVersion = 1 + +var ( + _ module.AppModuleBasic = AppModule{} + _ module.HasGenesis = AppModule{} + _ module.HasServices = AppModule{} + + _ appmodule.AppModule = AppModule{} + _ appmodule.HasBeginBlocker = AppModule{} +) + +// AppModuleBasic defines the basic application module used by the mint module. +type AppModuleBasic struct { + cdc codec.Codec +} + +// Name returns the mint module's name. +func (AppModuleBasic) Name() string { + return types.ModuleName +} + +// RegisterLegacyAminoCodec registers the mint module's types on the given LegacyAmino codec. +func (AppModuleBasic) RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + types.RegisterLegacyAminoCodec(cdc) +} + +// RegisterInterfaces registers the module's interface types. +func (b AppModuleBasic) RegisterInterfaces(r cdctypes.InterfaceRegistry) { + types.RegisterInterfaces(r) +} + +// DefaultGenesis returns default genesis state as raw bytes for the mint +// module. +func (AppModuleBasic) DefaultGenesis(cdc codec.JSONCodec) json.RawMessage { + return cdc.MustMarshalJSON(types.DefaultGenesisState()) +} + +// ValidateGenesis performs genesis state validation for the mint module. +// + +func (AppModuleBasic) ValidateGenesis(cdc codec.JSONCodec, _ client.TxEncodingConfig, bz json.RawMessage) error { + var data types.GenesisState + if err := cdc.UnmarshalJSON(bz, &data); err != nil { + return errors.Wrap(err, fmt.Sprintf("failed to unmarshal %s genesis state", types.ModuleName)) + } + + return types.ValidateGenesis(data) +} + +// RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the mint module. +func (AppModuleBasic) RegisterGRPCGatewayRoutes(clientCtx client.Context, mux *gwruntime.ServeMux) {} + +// AppModule implements an application module for the mint module. +type AppModule struct { + AppModuleBasic + + keeper keeper.Keeper + authKeeper types.AccountKeeper + + // inflationCalculator is used to calculate the inflation amount during BeginBlock. + // If inflationCalculator is nil, the default inflation calculation logic is used. + inflationCalculator types.InflationCalculationFn +} + +// NewAppModule creates a new AppModule object. If the InflationCalculationFn +// argument is nil, then the SDK's default inflation function will be used. +func NewAppModule( + cdc codec.Codec, + keeper keeper.Keeper, + ak types.AccountKeeper, + ic types.InflationCalculationFn, +) AppModule { + if ic == nil { + ic = types.DefaultInflationCalculationFn + } + + return AppModule{ + AppModuleBasic: AppModuleBasic{cdc: cdc}, + keeper: keeper, + authKeeper: ak, + inflationCalculator: ic, + } +} + +// RegisterServices registers a gRPC query service to respond to the +// module-specific gRPC queries. +func (am AppModule) RegisterServices(cfg module.Configurator) { + types.RegisterQueryServer(cfg.QueryServer(), keeper.NewQuerier(am.keeper)) +} + +// InitGenesis performs genesis initialization for the mint module. It returns +// no validator updates. +func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, data json.RawMessage) { + var genesisState types.GenesisState + cdc.MustUnmarshalJSON(data, &genesisState) + + am.keeper.InitGenesis(ctx, am.authKeeper, &genesisState) +} + +// ExportGenesis returns the exported genesis state as raw bytes for the mint +// module. +func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.RawMessage { + gs := am.keeper.ExportGenesis(ctx) + return cdc.MustMarshalJSON(gs) +} + +// ConsensusVersion implements AppModule/ConsensusVersion. +func (AppModule) ConsensusVersion() uint64 { return ConsensusVersion } + +// BeginBlock returns the begin blocker for the mint module. +func (am AppModule) BeginBlock(ctx context.Context) error { + return am.keeper.BeginBlocker(ctx, am.inflationCalculator) +} diff --git a/client/x/mint/module/module.proto b/client/x/mint/module/module.proto new file mode 100644 index 00000000..de949a56 --- /dev/null +++ b/client/x/mint/module/module.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; + +package client.x.mint.module; + +import "cosmos/app/v1alpha1/module.proto"; + +option go_package = "client/x/mint/module"; + +// Module is the config object of the mint module. +message Module { + option (cosmos.app.v1alpha1.module) = { + go_import: "github.com/piplabs/story/client/x/mint" + }; + + string fee_collector_name = 1; +} \ No newline at end of file diff --git a/client/x/mint/module/module.pulsar.go b/client/x/mint/module/module.pulsar.go new file mode 100644 index 00000000..a8015ade --- /dev/null +++ b/client/x/mint/module/module.pulsar.go @@ -0,0 +1,576 @@ +// Code generated by protoc-gen-go-pulsar. DO NOT EDIT. +package module + +import ( + _ "cosmossdk.io/api/cosmos/app/v1alpha1" + fmt "fmt" + runtime "github.com/cosmos/cosmos-proto/runtime" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoiface "google.golang.org/protobuf/runtime/protoiface" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + io "io" + reflect "reflect" + sync "sync" +) + +var ( + md_Module protoreflect.MessageDescriptor + fd_Module_fee_collector_name protoreflect.FieldDescriptor +) + +func init() { + file_client_x_mint_module_module_proto_init() + md_Module = File_client_x_mint_module_module_proto.Messages().ByName("Module") + fd_Module_fee_collector_name = md_Module.Fields().ByName("fee_collector_name") +} + +var _ protoreflect.Message = (*fastReflection_Module)(nil) + +type fastReflection_Module Module + +func (x *Module) ProtoReflect() protoreflect.Message { + return (*fastReflection_Module)(x) +} + +func (x *Module) slowProtoReflect() protoreflect.Message { + mi := &file_client_x_mint_module_module_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +var _fastReflection_Module_messageType fastReflection_Module_messageType +var _ protoreflect.MessageType = fastReflection_Module_messageType{} + +type fastReflection_Module_messageType struct{} + +func (x fastReflection_Module_messageType) Zero() protoreflect.Message { + return (*fastReflection_Module)(nil) +} +func (x fastReflection_Module_messageType) New() protoreflect.Message { + return new(fastReflection_Module) +} +func (x fastReflection_Module_messageType) Descriptor() protoreflect.MessageDescriptor { + return md_Module +} + +// Descriptor returns message descriptor, which contains only the protobuf +// type information for the message. +func (x *fastReflection_Module) Descriptor() protoreflect.MessageDescriptor { + return md_Module +} + +// Type returns the message type, which encapsulates both Go and protobuf +// type information. If the Go type information is not needed, +// it is recommended that the message descriptor be used instead. +func (x *fastReflection_Module) Type() protoreflect.MessageType { + return _fastReflection_Module_messageType +} + +// New returns a newly allocated and mutable empty message. +func (x *fastReflection_Module) New() protoreflect.Message { + return new(fastReflection_Module) +} + +// Interface unwraps the message reflection interface and +// returns the underlying ProtoMessage interface. +func (x *fastReflection_Module) Interface() protoreflect.ProtoMessage { + return (*Module)(x) +} + +// Range iterates over every populated field in an undefined order, +// calling f for each field descriptor and value encountered. +// Range returns immediately if f returns false. +// While iterating, mutating operations may only be performed +// on the current field descriptor. +func (x *fastReflection_Module) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + if x.FeeCollectorName != "" { + value := protoreflect.ValueOfString(x.FeeCollectorName) + if !f(fd_Module_fee_collector_name, value) { + return + } + } +} + +// Has reports whether a field is populated. +// +// Some fields have the property of nullability where it is possible to +// distinguish between the default value of a field and whether the field +// was explicitly populated with the default value. Singular message fields, +// member fields of a oneof, and proto2 scalar fields are nullable. Such +// fields are populated only if explicitly set. +// +// In other cases (aside from the nullable cases above), +// a proto3 scalar field is populated if it contains a non-zero value, and +// a repeated field is populated if it is non-empty. +func (x *fastReflection_Module) Has(fd protoreflect.FieldDescriptor) bool { + switch fd.FullName() { + case "client.x.mint.module.Module.fee_collector_name": + return x.FeeCollectorName != "" + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: client.x.mint.module.Module")) + } + panic(fmt.Errorf("message client.x.mint.module.Module does not contain field %s", fd.FullName())) + } +} + +// Clear clears the field such that a subsequent Has call reports false. +// +// Clearing an extension field clears both the extension type and value +// associated with the given field number. +// +// Clear is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_Module) Clear(fd protoreflect.FieldDescriptor) { + switch fd.FullName() { + case "client.x.mint.module.Module.fee_collector_name": + x.FeeCollectorName = "" + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: client.x.mint.module.Module")) + } + panic(fmt.Errorf("message client.x.mint.module.Module does not contain field %s", fd.FullName())) + } +} + +// Get retrieves the value for a field. +// +// For unpopulated scalars, it returns the default value, where +// the default value of a bytes scalar is guaranteed to be a copy. +// For unpopulated composite types, it returns an empty, read-only view +// of the value; to obtain a mutable reference, use Mutable. +func (x *fastReflection_Module) Get(descriptor protoreflect.FieldDescriptor) protoreflect.Value { + switch descriptor.FullName() { + case "client.x.mint.module.Module.fee_collector_name": + value := x.FeeCollectorName + return protoreflect.ValueOfString(value) + default: + if descriptor.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: client.x.mint.module.Module")) + } + panic(fmt.Errorf("message client.x.mint.module.Module does not contain field %s", descriptor.FullName())) + } +} + +// Set stores the value for a field. +// +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType. +// When setting a composite type, it is unspecified whether the stored value +// aliases the source's memory in any way. If the composite value is an +// empty, read-only value, then it panics. +// +// Set is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_Module) Set(fd protoreflect.FieldDescriptor, value protoreflect.Value) { + switch fd.FullName() { + case "client.x.mint.module.Module.fee_collector_name": + x.FeeCollectorName = value.Interface().(string) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: client.x.mint.module.Module")) + } + panic(fmt.Errorf("message client.x.mint.module.Module does not contain field %s", fd.FullName())) + } +} + +// Mutable returns a mutable reference to a composite type. +// +// If the field is unpopulated, it may allocate a composite value. +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType +// if not already stored. +// It panics if the field does not contain a composite type. +// +// Mutable is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_Module) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + case "client.x.mint.module.Module.fee_collector_name": + panic(fmt.Errorf("field fee_collector_name of message client.x.mint.module.Module is not mutable")) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: client.x.mint.module.Module")) + } + panic(fmt.Errorf("message client.x.mint.module.Module does not contain field %s", fd.FullName())) + } +} + +// NewField returns a new value that is assignable to the field +// for the given descriptor. For scalars, this returns the default value. +// For lists, maps, and messages, this returns a new, empty, mutable value. +func (x *fastReflection_Module) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + case "client.x.mint.module.Module.fee_collector_name": + return protoreflect.ValueOfString("") + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: client.x.mint.module.Module")) + } + panic(fmt.Errorf("message client.x.mint.module.Module does not contain field %s", fd.FullName())) + } +} + +// WhichOneof reports which field within the oneof is populated, +// returning nil if none are populated. +// It panics if the oneof descriptor does not belong to this message. +func (x *fastReflection_Module) WhichOneof(d protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { + switch d.FullName() { + default: + panic(fmt.Errorf("%s is not a oneof field in client.x.mint.module.Module", d.FullName())) + } + panic("unreachable") +} + +// GetUnknown retrieves the entire list of unknown fields. +// The caller may only mutate the contents of the RawFields +// if the mutated bytes are stored back into the message with SetUnknown. +func (x *fastReflection_Module) GetUnknown() protoreflect.RawFields { + return x.unknownFields +} + +// SetUnknown stores an entire list of unknown fields. +// The raw fields must be syntactically valid according to the wire format. +// An implementation may panic if this is not the case. +// Once stored, the caller must not mutate the content of the RawFields. +// An empty RawFields may be passed to clear the fields. +// +// SetUnknown is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_Module) SetUnknown(fields protoreflect.RawFields) { + x.unknownFields = fields +} + +// IsValid reports whether the message is valid. +// +// An invalid message is an empty, read-only value. +// +// An invalid message often corresponds to a nil pointer of the concrete +// message type, but the details are implementation dependent. +// Validity is not part of the protobuf data model, and may not +// be preserved in marshaling or other operations. +func (x *fastReflection_Module) IsValid() bool { + return x != nil +} + +// ProtoMethods returns optional fastReflectionFeature-path implementations of various operations. +// This method may return nil. +// +// The returned methods type is identical to +// "google.golang.org/protobuf/runtime/protoiface".Methods. +// Consult the protoiface package documentation for details. +func (x *fastReflection_Module) ProtoMethods() *protoiface.Methods { + size := func(input protoiface.SizeInput) protoiface.SizeOutput { + x := input.Message.Interface().(*Module) + if x == nil { + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: 0, + } + } + options := runtime.SizeInputToOptions(input) + _ = options + var n int + var l int + _ = l + l = len(x.FeeCollectorName) + if l > 0 { + n += 1 + l + runtime.Sov(uint64(l)) + } + if x.unknownFields != nil { + n += len(x.unknownFields) + } + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: n, + } + } + + marshal := func(input protoiface.MarshalInput) (protoiface.MarshalOutput, error) { + x := input.Message.Interface().(*Module) + if x == nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + options := runtime.MarshalInputToOptions(input) + _ = options + size := options.Size(x) + dAtA := make([]byte, size) + i := len(dAtA) + _ = i + var l int + _ = l + if x.unknownFields != nil { + i -= len(x.unknownFields) + copy(dAtA[i:], x.unknownFields) + } + if len(x.FeeCollectorName) > 0 { + i -= len(x.FeeCollectorName) + copy(dAtA[i:], x.FeeCollectorName) + i = runtime.EncodeVarint(dAtA, i, uint64(len(x.FeeCollectorName))) + i-- + dAtA[i] = 0xa + } + if input.Buf != nil { + input.Buf = append(input.Buf, dAtA...) + } else { + input.Buf = dAtA + } + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + unmarshal := func(input protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { + x := input.Message.Interface().(*Module) + if x == nil { + return protoiface.UnmarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Flags: input.Flags, + }, nil + } + options := runtime.UnmarshalInputToOptions(input) + _ = options + dAtA := input.Buf + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: Module: wiretype end group for non-group") + } + if fieldNum <= 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: Module: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field FeeCollectorName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + x.FeeCollectorName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := runtime.Skip(dAtA[iNdEx:]) + if err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if !options.DiscardUnknown { + x.unknownFields = append(x.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + } + iNdEx += skippy + } + } + + if iNdEx > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, nil + } + return &protoiface.Methods{ + NoUnkeyedLiterals: struct{}{}, + Flags: protoiface.SupportMarshalDeterministic | protoiface.SupportUnmarshalDiscardUnknown, + Size: size, + Marshal: marshal, + Unmarshal: unmarshal, + Merge: nil, + CheckInitialized: nil, + } +} + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.0 +// protoc (unknown) +// source: client/x/mint/module/module.proto + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Module is the config object of the mint module. +type Module struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FeeCollectorName string `protobuf:"bytes,1,opt,name=fee_collector_name,json=feeCollectorName,proto3" json:"fee_collector_name,omitempty"` +} + +func (x *Module) Reset() { + *x = Module{} + if protoimpl.UnsafeEnabled { + mi := &file_client_x_mint_module_module_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Module) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Module) ProtoMessage() {} + +// Deprecated: Use Module.ProtoReflect.Descriptor instead. +func (*Module) Descriptor() ([]byte, []int) { + return file_client_x_mint_module_module_proto_rawDescGZIP(), []int{0} +} + +func (x *Module) GetFeeCollectorName() string { + if x != nil { + return x.FeeCollectorName + } + return "" +} + +var File_client_x_mint_module_module_proto protoreflect.FileDescriptor + +var file_client_x_mint_module_module_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x78, 0x2f, 0x6d, 0x69, 0x6e, 0x74, 0x2f, + 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x78, 0x2e, 0x6d, 0x69, + 0x6e, 0x74, 0x2e, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x1a, 0x20, 0x63, 0x6f, 0x73, 0x6d, 0x6f, + 0x73, 0x2f, 0x61, 0x70, 0x70, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x6d, + 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x66, 0x0a, 0x06, 0x4d, + 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x66, 0x65, 0x65, 0x5f, 0x63, 0x6f, 0x6c, + 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x10, 0x66, 0x65, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x4e, + 0x61, 0x6d, 0x65, 0x3a, 0x2e, 0xba, 0xc0, 0x96, 0xda, 0x01, 0x28, 0x0a, 0x26, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x69, 0x70, 0x6c, 0x61, 0x62, 0x73, 0x2f, + 0x73, 0x74, 0x6f, 0x72, 0x79, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x78, 0x2f, 0x6d, + 0x69, 0x6e, 0x74, 0x42, 0xc2, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x2e, 0x78, 0x2e, 0x6d, 0x69, 0x6e, 0x74, 0x2e, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, + 0x42, 0x0b, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x25, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x73, 0x64, 0x6b, 0x2e, 0x69, 0x6f, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x78, 0x2f, 0x6d, 0x69, 0x6e, 0x74, 0x2f, + 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0xa2, 0x02, 0x04, 0x43, 0x58, 0x4d, 0x4d, 0xaa, 0x02, 0x14, + 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x58, 0x2e, 0x4d, 0x69, 0x6e, 0x74, 0x2e, 0x4d, 0x6f, + 0x64, 0x75, 0x6c, 0x65, 0xca, 0x02, 0x14, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5c, 0x58, 0x5c, + 0x4d, 0x69, 0x6e, 0x74, 0x5c, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0xe2, 0x02, 0x20, 0x43, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x5c, 0x58, 0x5c, 0x4d, 0x69, 0x6e, 0x74, 0x5c, 0x4d, 0x6f, 0x64, 0x75, + 0x6c, 0x65, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, + 0x17, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x3a, 0x3a, 0x58, 0x3a, 0x3a, 0x4d, 0x69, 0x6e, 0x74, + 0x3a, 0x3a, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_client_x_mint_module_module_proto_rawDescOnce sync.Once + file_client_x_mint_module_module_proto_rawDescData = file_client_x_mint_module_module_proto_rawDesc +) + +func file_client_x_mint_module_module_proto_rawDescGZIP() []byte { + file_client_x_mint_module_module_proto_rawDescOnce.Do(func() { + file_client_x_mint_module_module_proto_rawDescData = protoimpl.X.CompressGZIP(file_client_x_mint_module_module_proto_rawDescData) + }) + return file_client_x_mint_module_module_proto_rawDescData +} + +var file_client_x_mint_module_module_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_client_x_mint_module_module_proto_goTypes = []interface{}{ + (*Module)(nil), // 0: client.x.mint.module.Module +} +var file_client_x_mint_module_module_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_client_x_mint_module_module_proto_init() } +func file_client_x_mint_module_module_proto_init() { + if File_client_x_mint_module_module_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_client_x_mint_module_module_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Module); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_client_x_mint_module_module_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_client_x_mint_module_module_proto_goTypes, + DependencyIndexes: file_client_x_mint_module_module_proto_depIdxs, + MessageInfos: file_client_x_mint_module_module_proto_msgTypes, + }.Build() + File_client_x_mint_module_module_proto = out.File + file_client_x_mint_module_module_proto_rawDesc = nil + file_client_x_mint_module_module_proto_goTypes = nil + file_client_x_mint_module_module_proto_depIdxs = nil +} diff --git a/client/x/mint/testutil/expected_keepers_mocks.go b/client/x/mint/testutil/expected_keepers_mocks.go new file mode 100644 index 00000000..26be50dd --- /dev/null +++ b/client/x/mint/testutil/expected_keepers_mocks.go @@ -0,0 +1,195 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: x/mint/types/expected_keepers.go + +// Package testutil is a generated GoMock package. +package testutil + +import ( + context "context" + reflect "reflect" + + math "cosmossdk.io/math" + types "github.com/cosmos/cosmos-sdk/types" + gomock "github.com/golang/mock/gomock" +) + +// MockStakingKeeper is a mock of StakingKeeper interface. +type MockStakingKeeper struct { + ctrl *gomock.Controller + recorder *MockStakingKeeperMockRecorder +} + +// MockStakingKeeperMockRecorder is the mock recorder for MockStakingKeeper. +type MockStakingKeeperMockRecorder struct { + mock *MockStakingKeeper +} + +// NewMockStakingKeeper creates a new mock instance. +func NewMockStakingKeeper(ctrl *gomock.Controller) *MockStakingKeeper { + mock := &MockStakingKeeper{ctrl: ctrl} + mock.recorder = &MockStakingKeeperMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockStakingKeeper) EXPECT() *MockStakingKeeperMockRecorder { + return m.recorder +} + +// BondedRatio mocks base method. +func (m *MockStakingKeeper) BondedRatio(ctx context.Context) (math.LegacyDec, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BondedRatio", ctx) + ret0, _ := ret[0].(math.LegacyDec) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BondedRatio indicates an expected call of BondedRatio. +func (mr *MockStakingKeeperMockRecorder) BondedRatio(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BondedRatio", reflect.TypeOf((*MockStakingKeeper)(nil).BondedRatio), ctx) +} + +// StakingTokenSupply mocks base method. +func (m *MockStakingKeeper) StakingTokenSupply(ctx context.Context) (math.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StakingTokenSupply", ctx) + ret0, _ := ret[0].(math.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StakingTokenSupply indicates an expected call of StakingTokenSupply. +func (mr *MockStakingKeeperMockRecorder) StakingTokenSupply(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StakingTokenSupply", reflect.TypeOf((*MockStakingKeeper)(nil).StakingTokenSupply), ctx) +} + +// MockAccountKeeper is a mock of AccountKeeper interface. +type MockAccountKeeper struct { + ctrl *gomock.Controller + recorder *MockAccountKeeperMockRecorder +} + +// MockAccountKeeperMockRecorder is the mock recorder for MockAccountKeeper. +type MockAccountKeeperMockRecorder struct { + mock *MockAccountKeeper +} + +// NewMockAccountKeeper creates a new mock instance. +func NewMockAccountKeeper(ctrl *gomock.Controller) *MockAccountKeeper { + mock := &MockAccountKeeper{ctrl: ctrl} + mock.recorder = &MockAccountKeeperMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockAccountKeeper) EXPECT() *MockAccountKeeperMockRecorder { + return m.recorder +} + +// GetModuleAccount mocks base method. +func (m *MockAccountKeeper) GetModuleAccount(ctx context.Context, moduleName string) types.ModuleAccountI { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetModuleAccount", ctx, moduleName) + ret0, _ := ret[0].(types.ModuleAccountI) + return ret0 +} + +// GetModuleAccount indicates an expected call of GetModuleAccount. +func (mr *MockAccountKeeperMockRecorder) GetModuleAccount(ctx, moduleName interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetModuleAccount", reflect.TypeOf((*MockAccountKeeper)(nil).GetModuleAccount), ctx, moduleName) +} + +// GetModuleAddress mocks base method. +func (m *MockAccountKeeper) GetModuleAddress(name string) types.AccAddress { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetModuleAddress", name) + ret0, _ := ret[0].(types.AccAddress) + return ret0 +} + +// GetModuleAddress indicates an expected call of GetModuleAddress. +func (mr *MockAccountKeeperMockRecorder) GetModuleAddress(name interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetModuleAddress", reflect.TypeOf((*MockAccountKeeper)(nil).GetModuleAddress), name) +} + +// SetModuleAccount mocks base method. +func (m *MockAccountKeeper) SetModuleAccount(arg0 context.Context, arg1 types.ModuleAccountI) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetModuleAccount", arg0, arg1) +} + +// SetModuleAccount indicates an expected call of SetModuleAccount. +func (mr *MockAccountKeeperMockRecorder) SetModuleAccount(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetModuleAccount", reflect.TypeOf((*MockAccountKeeper)(nil).SetModuleAccount), arg0, arg1) +} + +// MockBankKeeper is a mock of BankKeeper interface. +type MockBankKeeper struct { + ctrl *gomock.Controller + recorder *MockBankKeeperMockRecorder +} + +// MockBankKeeperMockRecorder is the mock recorder for MockBankKeeper. +type MockBankKeeperMockRecorder struct { + mock *MockBankKeeper +} + +// NewMockBankKeeper creates a new mock instance. +func NewMockBankKeeper(ctrl *gomock.Controller) *MockBankKeeper { + mock := &MockBankKeeper{ctrl: ctrl} + mock.recorder = &MockBankKeeperMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockBankKeeper) EXPECT() *MockBankKeeperMockRecorder { + return m.recorder +} + +// MintCoins mocks base method. +func (m *MockBankKeeper) MintCoins(ctx context.Context, name string, amt types.Coins) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MintCoins", ctx, name, amt) + ret0, _ := ret[0].(error) + return ret0 +} + +// MintCoins indicates an expected call of MintCoins. +func (mr *MockBankKeeperMockRecorder) MintCoins(ctx, name, amt interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MintCoins", reflect.TypeOf((*MockBankKeeper)(nil).MintCoins), ctx, name, amt) +} + +// SendCoinsFromModuleToAccount mocks base method. +func (m *MockBankKeeper) SendCoinsFromModuleToAccount(ctx context.Context, senderModule string, recipientAddr types.AccAddress, amt types.Coins) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendCoinsFromModuleToAccount", ctx, senderModule, recipientAddr, amt) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendCoinsFromModuleToAccount indicates an expected call of SendCoinsFromModuleToAccount. +func (mr *MockBankKeeperMockRecorder) SendCoinsFromModuleToAccount(ctx, senderModule, recipientAddr, amt interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendCoinsFromModuleToAccount", reflect.TypeOf((*MockBankKeeper)(nil).SendCoinsFromModuleToAccount), ctx, senderModule, recipientAddr, amt) +} + +// SendCoinsFromModuleToModule mocks base method. +func (m *MockBankKeeper) SendCoinsFromModuleToModule(ctx context.Context, senderModule, recipientModule string, amt types.Coins) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendCoinsFromModuleToModule", ctx, senderModule, recipientModule, amt) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendCoinsFromModuleToModule indicates an expected call of SendCoinsFromModuleToModule. +func (mr *MockBankKeeperMockRecorder) SendCoinsFromModuleToModule(ctx, senderModule, recipientModule, amt interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendCoinsFromModuleToModule", reflect.TypeOf((*MockBankKeeper)(nil).SendCoinsFromModuleToModule), ctx, senderModule, recipientModule, amt) +} diff --git a/client/x/mint/types/codec.go b/client/x/mint/types/codec.go new file mode 100644 index 00000000..a3fdb433 --- /dev/null +++ b/client/x/mint/types/codec.go @@ -0,0 +1,19 @@ +package types + +import ( + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// RegisterLegacyAminoCodec registers concrete types on the LegacyAmino codec. +func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + cdc.RegisterConcrete(Params{}, "client/x/mint/Params", nil) +} + +// RegisterInterfaces registers the interfaces types with the interface registry. +func RegisterInterfaces(registry types.InterfaceRegistry) { + registry.RegisterImplementations( + (*sdk.Msg)(nil), + ) +} diff --git a/client/x/mint/types/events.go b/client/x/mint/types/events.go new file mode 100644 index 00000000..053d12a3 --- /dev/null +++ b/client/x/mint/types/events.go @@ -0,0 +1,6 @@ +package types + +// Minting module event types. +const ( + EventTypeMint = ModuleName +) diff --git a/client/x/mint/types/expected_keepers.go b/client/x/mint/types/expected_keepers.go new file mode 100644 index 00000000..dcdfda77 --- /dev/null +++ b/client/x/mint/types/expected_keepers.go @@ -0,0 +1,32 @@ +package types // noalias + +import ( + "context" + + "cosmossdk.io/math" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// StakingKeeper defines the expected staking keeper. +type StakingKeeper interface { + StakingTokenSupply(ctx context.Context) (math.Int, error) + BondedRatio(ctx context.Context) (math.LegacyDec, error) +} + +// AccountKeeper defines the contract required for account APIs. +type AccountKeeper interface { + GetModuleAddress(name string) sdk.AccAddress + + // TODO remove with genesis 2-phases refactor https://github.com/cosmos/cosmos-sdk/issues/2862 + SetModuleAccount(ctx context.Context, moduleAccount sdk.ModuleAccountI) + GetModuleAccount(ctx context.Context, moduleName string) sdk.ModuleAccountI +} + +// BankKeeper defines the contract needed to be fulfilled for banking and supply +// dependencies. +type BankKeeper interface { + SendCoinsFromModuleToAccount(ctx context.Context, senderModule string, recipientAddr sdk.AccAddress, amt sdk.Coins) error + SendCoinsFromModuleToModule(ctx context.Context, senderModule, recipientModule string, amt sdk.Coins) error + MintCoins(ctx context.Context, name string, amt sdk.Coins) error +} diff --git a/client/x/mint/types/genesis.go b/client/x/mint/types/genesis.go new file mode 100644 index 00000000..ff80226e --- /dev/null +++ b/client/x/mint/types/genesis.go @@ -0,0 +1,39 @@ +package types + +import ( + "context" + + "cosmossdk.io/math" +) + +// InflationCalculationFn defines the function required to calculate inflation amount during +// BeginBlock. It receives the params stored in the keeper, along with the current +// bondedRatio and returns the newly calculated inflation amount. +// It can be used to specify a custom inflation calculation logic, instead of relying on the +// default logic provided by the sdk. +type InflationCalculationFn func(ctx context.Context, params Params, bondedRatio math.LegacyDec) math.LegacyDec + +// DefaultInflationCalculationFn is the default function used to calculate inflation. +func DefaultInflationCalculationFn(_ context.Context, params Params, _ math.LegacyDec) math.LegacyDec { + return math.LegacyNewDec(int64(params.InflationsPerYear)).Quo(math.LegacyNewDec(int64(params.BlocksPerYear))) +} + +// NewGenesisState creates a new GenesisState object. +func NewGenesisState(params Params) *GenesisState { + return &GenesisState{ + Params: params, + } +} + +// DefaultGenesisState creates a default GenesisState object. +func DefaultGenesisState() *GenesisState { + return &GenesisState{ + Params: DefaultParams(), + } +} + +// ValidateGenesis validates the provided genesis state to ensure the +// expected invariants holds. +func ValidateGenesis(data GenesisState) error { + return data.Params.Validate() +} diff --git a/client/x/mint/types/genesis.pb.go b/client/x/mint/types/genesis.pb.go new file mode 100644 index 00000000..9f7e83c8 --- /dev/null +++ b/client/x/mint/types/genesis.pb.go @@ -0,0 +1,322 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: client/x/mint/types/genesis.proto + +package types + +import ( + fmt "fmt" + _ "github.com/cosmos/cosmos-sdk/types/tx/amino" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// GenesisState defines the mint module's genesis state. +type GenesisState struct { + // params defines all the parameters of the module. + Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params"` +} + +func (m *GenesisState) Reset() { *m = GenesisState{} } +func (m *GenesisState) String() string { return proto.CompactTextString(m) } +func (*GenesisState) ProtoMessage() {} +func (*GenesisState) Descriptor() ([]byte, []int) { + return fileDescriptor_f5cd7edb2fa50db9, []int{0} +} +func (m *GenesisState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GenesisState) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenesisState.Merge(m, src) +} +func (m *GenesisState) XXX_Size() int { + return m.Size() +} +func (m *GenesisState) XXX_DiscardUnknown() { + xxx_messageInfo_GenesisState.DiscardUnknown(m) +} + +var xxx_messageInfo_GenesisState proto.InternalMessageInfo + +func (m *GenesisState) GetParams() Params { + if m != nil { + return m.Params + } + return Params{} +} + +func init() { + proto.RegisterType((*GenesisState)(nil), "client.x.mint.types.GenesisState") +} + +func init() { proto.RegisterFile("client/x/mint/types/genesis.proto", fileDescriptor_f5cd7edb2fa50db9) } + +var fileDescriptor_f5cd7edb2fa50db9 = []byte{ + // 192 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4c, 0xce, 0xc9, 0x4c, + 0xcd, 0x2b, 0xd1, 0xaf, 0xd0, 0xcf, 0xcd, 0xcc, 0x2b, 0xd1, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, + 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x86, + 0x28, 0xd1, 0xab, 0xd0, 0x03, 0x29, 0xd1, 0x03, 0x2b, 0x91, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, + 0xcb, 0xeb, 0x83, 0x58, 0x10, 0xa5, 0x52, 0x72, 0xd8, 0x4c, 0x03, 0xeb, 0x82, 0xc8, 0x0b, 0x26, + 0xe6, 0x66, 0xe6, 0xe5, 0xeb, 0x83, 0x49, 0x88, 0x90, 0x92, 0x1f, 0x17, 0x8f, 0x3b, 0xc4, 0xba, + 0xe0, 0x92, 0xc4, 0x92, 0x54, 0x21, 0x3b, 0x2e, 0xb6, 0x82, 0xc4, 0xa2, 0xc4, 0xdc, 0x62, 0x09, + 0x46, 0x05, 0x46, 0x0d, 0x6e, 0x23, 0x69, 0x3d, 0x2c, 0xd6, 0xeb, 0x05, 0x80, 0x95, 0x38, 0x71, + 0x9e, 0xb8, 0x27, 0xcf, 0xb0, 0xe2, 0xf9, 0x06, 0x2d, 0xc6, 0x20, 0xa8, 0x2e, 0x27, 0xdd, 0x13, + 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, + 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x88, 0x12, 0xc6, 0xe2, 0xb8, 0x24, 0x36, 0xb0, + 0x2b, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xfb, 0xaf, 0x59, 0x3b, 0x08, 0x01, 0x00, 0x00, +} + +func (m *GenesisState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { + offset -= sovGenesis(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GenesisState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Params.Size() + n += 1 + l + sovGenesis(uint64(l)) + return n +} + +func sovGenesis(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenesis(x uint64) (n int) { + return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GenesisState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenesis(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenesis + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenesis + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenesis + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") +) diff --git a/client/x/mint/types/genesis.proto b/client/x/mint/types/genesis.proto new file mode 100644 index 00000000..e9d0af6e --- /dev/null +++ b/client/x/mint/types/genesis.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; +package client.x.mint.types; + +import "gogoproto/gogo.proto"; +import "client/x/mint/types/mint.proto"; +import "amino/amino.proto"; + +option go_package = "client/x/mint/types"; + +// GenesisState defines the mint module's genesis state. +message GenesisState { + // params defines all the parameters of the module. + Params params = 1 [(gogoproto.nullable) = false, (amino.dont_omitempty) = true]; +} diff --git a/client/x/mint/types/inflation_contract.go b/client/x/mint/types/inflation_contract.go new file mode 100644 index 00000000..113232c3 --- /dev/null +++ b/client/x/mint/types/inflation_contract.go @@ -0,0 +1,36 @@ +package types + +import ( + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + + "github.com/piplabs/story/contracts/bindings" +) + +// (rayden) TODO. +var ( + inflationUpdateABI = mustGetABI(bindings.IPTokenStakingMetaData) + SetInflationParameters = mustGetEvent(inflationUpdateABI, "Deposit") +) + +// mustGetABI returns the metadata's ABI as an abi.ABI type. +// It panics on error. +func mustGetABI(metadata *bind.MetaData) *abi.ABI { + abi, err := metadata.GetAbi() + if err != nil { + panic(err) + } + + return abi +} + +// mustGetEvent returns the event with the given name from the ABI. +// It panics if the event is not found. +func mustGetEvent(abi *abi.ABI, name string) abi.Event { + event, ok := abi.Events[name] + if !ok { + panic("event not found") + } + + return event +} diff --git a/client/x/mint/types/keys.go b/client/x/mint/types/keys.go new file mode 100644 index 00000000..cfa4cfd5 --- /dev/null +++ b/client/x/mint/types/keys.go @@ -0,0 +1,17 @@ +package types + +import "cosmossdk.io/collections" + +var ( + // MinterKey is the key to use for the keeper store. + MinterKey = collections.NewPrefix(0) + ParamsKey = collections.NewPrefix(1) +) + +const ( + // module name. + ModuleName = "mint" + + // StoreKey is the default store key for mint. + StoreKey = ModuleName +) diff --git a/client/x/mint/types/mint.pb.go b/client/x/mint/types/mint.pb.go new file mode 100644 index 00000000..59f0e424 --- /dev/null +++ b/client/x/mint/types/mint.pb.go @@ -0,0 +1,393 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: client/x/mint/types/mint.proto + +package types + +import ( + fmt "fmt" + _ "github.com/cosmos/cosmos-sdk/types/tx/amino" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Params defines the parameters for the x/mint module. +type Params struct { + // type of coin to mint + MintDenom string `protobuf:"bytes,1,opt,name=mint_denom,json=mintDenom,proto3" json:"mint_denom,omitempty"` + // inflation amount per year + InflationsPerYear uint64 `protobuf:"varint,2,opt,name=inflations_per_year,json=inflationsPerYear,proto3" json:"inflations_per_year,omitempty"` + // expected blocks per year + BlocksPerYear uint64 `protobuf:"varint,3,opt,name=blocks_per_year,json=blocksPerYear,proto3" json:"blocks_per_year,omitempty"` +} + +func (m *Params) Reset() { *m = Params{} } +func (m *Params) String() string { return proto.CompactTextString(m) } +func (*Params) ProtoMessage() {} +func (*Params) Descriptor() ([]byte, []int) { + return fileDescriptor_9c6e60aec58f52af, []int{0} +} +func (m *Params) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Params.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Params) XXX_Merge(src proto.Message) { + xxx_messageInfo_Params.Merge(m, src) +} +func (m *Params) XXX_Size() int { + return m.Size() +} +func (m *Params) XXX_DiscardUnknown() { + xxx_messageInfo_Params.DiscardUnknown(m) +} + +var xxx_messageInfo_Params proto.InternalMessageInfo + +func (m *Params) GetMintDenom() string { + if m != nil { + return m.MintDenom + } + return "" +} + +func (m *Params) GetInflationsPerYear() uint64 { + if m != nil { + return m.InflationsPerYear + } + return 0 +} + +func (m *Params) GetBlocksPerYear() uint64 { + if m != nil { + return m.BlocksPerYear + } + return 0 +} + +func init() { + proto.RegisterType((*Params)(nil), "client.x.mint.types.Params") +} + +func init() { proto.RegisterFile("client/x/mint/types/mint.proto", fileDescriptor_9c6e60aec58f52af) } + +var fileDescriptor_9c6e60aec58f52af = []byte{ + // 219 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xce, 0xc9, 0x4c, + 0xcd, 0x2b, 0xd1, 0xaf, 0xd0, 0xcf, 0xcd, 0xcc, 0x2b, 0xd1, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0x06, + 0x33, 0xf5, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, 0x85, 0x84, 0x21, 0xf2, 0x7a, 0x15, 0x7a, 0x60, 0x41, + 0xb0, 0xbc, 0x94, 0x60, 0x62, 0x6e, 0x66, 0x5e, 0xbe, 0x3e, 0x98, 0x84, 0xa8, 0x53, 0x9a, 0xc5, + 0xc8, 0xc5, 0x16, 0x90, 0x58, 0x94, 0x98, 0x5b, 0x2c, 0x24, 0xcb, 0xc5, 0x05, 0x52, 0x1b, 0x9f, + 0x92, 0x9a, 0x97, 0x9f, 0x2b, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x19, 0xc4, 0x09, 0x12, 0x71, 0x01, + 0x09, 0x08, 0xe9, 0x71, 0x09, 0x67, 0xe6, 0xa5, 0xe5, 0x24, 0x96, 0x64, 0xe6, 0xe7, 0x15, 0xc7, + 0x17, 0xa4, 0x16, 0xc5, 0x57, 0xa6, 0x26, 0x16, 0x49, 0x30, 0x29, 0x30, 0x6a, 0xb0, 0x04, 0x09, + 0x22, 0xa4, 0x02, 0x52, 0x8b, 0x22, 0x53, 0x13, 0x8b, 0x84, 0xd4, 0xb8, 0xf8, 0x93, 0x72, 0xf2, + 0x93, 0xb3, 0x91, 0xd4, 0x32, 0x83, 0xd5, 0xf2, 0x42, 0x84, 0xa1, 0xea, 0xac, 0x24, 0xbb, 0x9e, + 0x6f, 0xd0, 0x12, 0x41, 0xf5, 0x0e, 0xc4, 0x45, 0x4e, 0xba, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, + 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x17, 0x1e, 0xcb, 0x31, 0xdc, + 0x78, 0x2c, 0xc7, 0x10, 0x25, 0x8c, 0xc5, 0xfb, 0x49, 0x6c, 0x60, 0x2f, 0x19, 0x03, 0x02, 0x00, + 0x00, 0xff, 0xff, 0xd9, 0x4d, 0x19, 0x4f, 0x1c, 0x01, 0x00, 0x00, +} + +func (m *Params) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Params) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.BlocksPerYear != 0 { + i = encodeVarintMint(dAtA, i, uint64(m.BlocksPerYear)) + i-- + dAtA[i] = 0x18 + } + if m.InflationsPerYear != 0 { + i = encodeVarintMint(dAtA, i, uint64(m.InflationsPerYear)) + i-- + dAtA[i] = 0x10 + } + if len(m.MintDenom) > 0 { + i -= len(m.MintDenom) + copy(dAtA[i:], m.MintDenom) + i = encodeVarintMint(dAtA, i, uint64(len(m.MintDenom))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintMint(dAtA []byte, offset int, v uint64) int { + offset -= sovMint(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Params) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.MintDenom) + if l > 0 { + n += 1 + l + sovMint(uint64(l)) + } + if m.InflationsPerYear != 0 { + n += 1 + sovMint(uint64(m.InflationsPerYear)) + } + if m.BlocksPerYear != 0 { + n += 1 + sovMint(uint64(m.BlocksPerYear)) + } + return n +} + +func sovMint(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozMint(x uint64) (n int) { + return sovMint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Params) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Params: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MintDenom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMint + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MintDenom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InflationsPerYear", wireType) + } + m.InflationsPerYear = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.InflationsPerYear |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlocksPerYear", wireType) + } + m.BlocksPerYear = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BlocksPerYear |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMint(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMint + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMint(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMint + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMint + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMint + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthMint + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupMint + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthMint + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthMint = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMint = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupMint = fmt.Errorf("proto: unexpected end of group") +) diff --git a/client/x/mint/types/mint.proto b/client/x/mint/types/mint.proto new file mode 100644 index 00000000..509ccd8f --- /dev/null +++ b/client/x/mint/types/mint.proto @@ -0,0 +1,18 @@ +syntax = "proto3"; +package client.x.mint.types; + +option go_package = "client/x/mint/types"; + +import "amino/amino.proto"; + +// Params defines the parameters for the x/mint module. +message Params { + option (amino.name) = "client/x/mint/Params"; + + // type of coin to mint + string mint_denom = 1; + // inflation amount per year + uint64 inflations_per_year = 2; + // expected blocks per year + uint64 blocks_per_year = 3; +} diff --git a/client/x/mint/types/params.go b/client/x/mint/types/params.go new file mode 100644 index 00000000..cc5197a3 --- /dev/null +++ b/client/x/mint/types/params.go @@ -0,0 +1,92 @@ +//nolint:revive // just use interface{} +package types + +import ( + "fmt" + "strings" + + "cosmossdk.io/math" + + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/piplabs/story/lib/errors" +) + +// NewParams returns Params instance with the given values. +func NewParams(mintDenom string, inflationsPerYear uint64, blocksPerYear uint64) Params { + return Params{ + MintDenom: mintDenom, + InflationsPerYear: inflationsPerYear, + BlocksPerYear: blocksPerYear, + } +} + +// DefaultParams returns default x/mint module parameters. +func DefaultParams() Params { + return Params{ + MintDenom: sdk.DefaultBondDenom, + InflationsPerYear: 24625000_000_000_000, + BlocksPerYear: uint64(60 * 60 * 8766 / 5), // assuming 5 second block times + } +} + +// Validate does the sanity check on the params. +func (p Params) Validate() error { + if err := validateMintDenom(p.MintDenom); err != nil { + return err + } + if err := validateInflationsPerYear(p.InflationsPerYear); err != nil { + return err + } + if err := validateBlocksPerYear(p.BlocksPerYear); err != nil { + return err + } + + return nil +} + +func validateMintDenom(i interface{}) error { + v, ok := i.(string) + if !ok { + return fmt.Errorf("invalid parameter type: %T", i) + } + + if strings.TrimSpace(v) == "" { + return errors.New("mint denom cannot be blank") + } + + if err := sdk.ValidateDenom(v); err != nil { + return errors.Wrap(err, "mint denom is invalid") + } + + return nil +} + +func validateInflationsPerYear(i interface{}) error { + v, ok := i.(math.LegacyDec) + if !ok { + return fmt.Errorf("invalid parameter type: %T", i) + } + + if v.IsNil() { + return fmt.Errorf("inflations per year cannot be nil: %s", v) + } + if v.IsNegative() { + return fmt.Errorf("inflations per year cannot be negative: %s", v) + } + + return nil +} + +func validateBlocksPerYear(i interface{}) error { + v, ok := i.(uint64) + if !ok { + return fmt.Errorf("invalid parameter type: %T", i) + } + + if v == 0 { + return fmt.Errorf("blocks per year must be positive: %d", v) + } + + return nil +} diff --git a/client/x/mint/types/query.pb.go b/client/x/mint/types/query.pb.go new file mode 100644 index 00000000..89738614 --- /dev/null +++ b/client/x/mint/types/query.pb.go @@ -0,0 +1,536 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: client/x/mint/types/query.proto + +package types + +import ( + context "context" + fmt "fmt" + _ "github.com/cosmos/cosmos-sdk/types/tx/amino" + _ "github.com/cosmos/gogoproto/gogoproto" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// QueryParamsRequest is the request type for the Query/Params RPC method. +type QueryParamsRequest struct { +} + +func (m *QueryParamsRequest) Reset() { *m = QueryParamsRequest{} } +func (m *QueryParamsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryParamsRequest) ProtoMessage() {} +func (*QueryParamsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a6d4efab0120ffa4, []int{0} +} +func (m *QueryParamsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryParamsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryParamsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryParamsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryParamsRequest.Merge(m, src) +} +func (m *QueryParamsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryParamsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryParamsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryParamsRequest proto.InternalMessageInfo + +// QueryParamsResponse is the response type for the Query/Params RPC method. +type QueryParamsResponse struct { + // params defines the parameters of the module. + Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params"` +} + +func (m *QueryParamsResponse) Reset() { *m = QueryParamsResponse{} } +func (m *QueryParamsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryParamsResponse) ProtoMessage() {} +func (*QueryParamsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a6d4efab0120ffa4, []int{1} +} +func (m *QueryParamsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryParamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryParamsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryParamsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryParamsResponse.Merge(m, src) +} +func (m *QueryParamsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryParamsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryParamsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryParamsResponse proto.InternalMessageInfo + +func (m *QueryParamsResponse) GetParams() Params { + if m != nil { + return m.Params + } + return Params{} +} + +func init() { + proto.RegisterType((*QueryParamsRequest)(nil), "client.x.mint.types.QueryParamsRequest") + proto.RegisterType((*QueryParamsResponse)(nil), "client.x.mint.types.QueryParamsResponse") +} + +func init() { proto.RegisterFile("client/x/mint/types/query.proto", fileDescriptor_a6d4efab0120ffa4) } + +var fileDescriptor_a6d4efab0120ffa4 = []byte{ + // 279 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xce, 0xc9, 0x4c, + 0xcd, 0x2b, 0xd1, 0xaf, 0xd0, 0xcf, 0xcd, 0xcc, 0x2b, 0xd1, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, + 0x2f, 0x2c, 0x4d, 0x2d, 0xaa, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x86, 0x28, 0xd0, + 0xab, 0xd0, 0x03, 0x29, 0xd0, 0x03, 0x2b, 0x90, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0xcb, 0xeb, + 0x83, 0x58, 0x10, 0xa5, 0x52, 0x32, 0xe9, 0xf9, 0xf9, 0xe9, 0x39, 0xa9, 0xfa, 0x89, 0x05, 0x99, + 0xfa, 0x89, 0x79, 0x79, 0xf9, 0x25, 0x89, 0x25, 0x99, 0xf9, 0x79, 0xc5, 0x50, 0x59, 0x39, 0x6c, + 0x36, 0x81, 0xcd, 0x84, 0xc8, 0x0b, 0x26, 0xe6, 0x66, 0xe6, 0xe5, 0xeb, 0x83, 0x49, 0x88, 0x90, + 0x92, 0x08, 0x97, 0x50, 0x20, 0xc8, 0x29, 0x01, 0x89, 0x45, 0x89, 0xb9, 0xc5, 0x41, 0xa9, 0x85, + 0xa5, 0xa9, 0xc5, 0x25, 0x4a, 0xa1, 0x5c, 0xc2, 0x28, 0xa2, 0xc5, 0x05, 0xf9, 0x79, 0xc5, 0xa9, + 0x42, 0x76, 0x5c, 0x6c, 0x05, 0x60, 0x11, 0x09, 0x46, 0x05, 0x46, 0x0d, 0x6e, 0x23, 0x69, 0x3d, + 0x2c, 0x2e, 0xd7, 0x83, 0x68, 0x72, 0xe2, 0x3c, 0x71, 0x4f, 0x9e, 0x61, 0xc5, 0xf3, 0x0d, 0x5a, + 0x8c, 0x41, 0x50, 0x5d, 0x46, 0x2d, 0x8c, 0x5c, 0xac, 0x60, 0x73, 0x85, 0xaa, 0xb9, 0xd8, 0x20, + 0xca, 0x84, 0xd4, 0xb1, 0x9a, 0x81, 0xe9, 0x26, 0x29, 0x0d, 0xc2, 0x0a, 0x21, 0xce, 0x54, 0x92, + 0x6b, 0xba, 0xfc, 0x64, 0x32, 0x93, 0x84, 0x90, 0x98, 0x3e, 0x34, 0x3c, 0xc0, 0xa1, 0x51, 0x66, + 0xa8, 0x0f, 0x71, 0x86, 0x93, 0xee, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, + 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0xc3, 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x44, + 0x09, 0x63, 0x09, 0xc0, 0x24, 0x36, 0x70, 0x48, 0x19, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x33, + 0x4d, 0xc0, 0x75, 0xc8, 0x01, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryClient is the client API for Query service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryClient interface { + // Params returns the total set of minting parameters. + Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) +} + +type queryClient struct { + cc grpc1.ClientConn +} + +func NewQueryClient(cc grpc1.ClientConn) QueryClient { + return &queryClient{cc} +} + +func (c *queryClient) Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) { + out := new(QueryParamsResponse) + err := c.cc.Invoke(ctx, "/client.x.mint.types.Query/Params", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryServer is the server API for Query service. +type QueryServer interface { + // Params returns the total set of minting parameters. + Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error) +} + +// UnimplementedQueryServer can be embedded to have forward compatible implementations. +type UnimplementedQueryServer struct { +} + +func (*UnimplementedQueryServer) Params(ctx context.Context, req *QueryParamsRequest) (*QueryParamsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Params not implemented") +} + +func RegisterQueryServer(s grpc1.Server, srv QueryServer) { + s.RegisterService(&_Query_serviceDesc, srv) +} + +func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryParamsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Params(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/client.x.mint.types.Query/Params", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Params(ctx, req.(*QueryParamsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Query_serviceDesc = grpc.ServiceDesc{ + ServiceName: "client.x.mint.types.Query", + HandlerType: (*QueryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Params", + Handler: _Query_Params_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "client/x/mint/types/query.proto", +} + +func (m *QueryParamsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParamsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryParamsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *QueryParamsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParamsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { + offset -= sovQuery(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryParamsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *QueryParamsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Params.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func sovQuery(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozQuery(x uint64) (n int) { + return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryParamsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryParamsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryParamsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryParamsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryParamsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipQuery(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthQuery + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupQuery + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthQuery + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") +) diff --git a/client/x/mint/types/query.proto b/client/x/mint/types/query.proto new file mode 100644 index 00000000..c71b49f3 --- /dev/null +++ b/client/x/mint/types/query.proto @@ -0,0 +1,26 @@ +syntax = "proto3"; +package client.x.mint.types; + +import "gogoproto/gogo.proto"; +import "google/api/annotations.proto"; +import "client/x/mint/types/mint.proto"; +import "amino/amino.proto"; + +option go_package = "client/x/mint/types"; + +// Query provides defines the gRPC querier service. +service Query { + // Params returns the total set of minting parameters. + rpc Params(QueryParamsRequest) returns (QueryParamsResponse) { + option (google.api.http).get = "/client/mint/v1/params"; + } +} + +// QueryParamsRequest is the request type for the Query/Params RPC method. +message QueryParamsRequest {} + +// QueryParamsResponse is the response type for the Query/Params RPC method. +message QueryParamsResponse { + // params defines the parameters of the module. + Params params = 1 [(gogoproto.nullable) = false, (amino.dont_omitempty) = true]; +} diff --git a/go.mod b/go.mod index 3c36fe7a..6cc516df 100644 --- a/go.mod +++ b/go.mod @@ -129,7 +129,7 @@ require ( github.com/gogo/googleapis v1.4.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/glog v1.2.0 // indirect - github.com/golang/mock v1.6.0 // indirect + github.com/golang/mock v1.6.0 github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/google/btree v1.1.2 // indirect From 6dd797e62d3c47708f01a708074e96f36914e860 Mon Sep 17 00:00:00 2001 From: Zerui Ge Date: Tue, 8 Oct 2024 23:02:52 -0700 Subject: [PATCH 25/29] feat(netconf): fix genesis config of mint module in local netconf (#175) --- lib/netconf/local/genesis.json | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/lib/netconf/local/genesis.json b/lib/netconf/local/genesis.json index bd2bbf5e..1f436b8b 100644 --- a/lib/netconf/local/genesis.json +++ b/lib/netconf/local/genesis.json @@ -179,16 +179,9 @@ } }, "mint": { - "minter": { - "inflation": "0.130000000000000000", - "annual_provisions": "0.000000000000000000" - }, "params": { "mint_denom": "stake", - "inflation_rate_change": "0.130000000000000000", - "inflation_max": "0.200000000000000000", - "inflation_min": "0.070000000000000000", - "goal_bonded": "0.670000000000000000", + "inflations_per_year": "24625000000000000.000000000000000000", "blocks_per_year": "6311520" } }, From 2459fb15271bf258b00eccb40d9c05c36db5e374 Mon Sep 17 00:00:00 2001 From: Ramarti Date: Wed, 9 Oct 2024 12:35:17 -0300 Subject: [PATCH 26/29] feat(contracts): transparent proxies and el genesis (#165) * Change UUPS by TransparentUpgradeableProxy, use etching to generate initial contracts * etch all the contracts * git ignore local dumps * fix contracts/script/EtchInitialState.s.sol, use it also as setup for tests * fix GenerateAlloc for UpgradeEntryPoint, add allocations and test upgradeability * lint and cl fixes * temporarily disabled solhint in CI/CD * temporarily remove run_solidity_lint * temporarily remove lint form workflow * remove need for env function in test --- .github/workflows/ci-foundry.yml | 7 +- .gitignore | 3 + .pre-commit/run_solhint.sh | 13 +- .pre-commit/run_solidity_lint.sh | 13 +- contracts/script/DeployCore.s.sol | 62 ----- contracts/script/GenerateAlloc.s.sol | 243 ++++++++++++++++++ contracts/script/TestPrecompileUpgrades.s.sol | 128 +++++++++ contracts/src/libraries/Predeploys.sol | 6 +- contracts/src/protocol/IPTokenSlashing.sol | 8 +- contracts/src/protocol/IPTokenStaking.sol | 8 +- contracts/src/protocol/UpgradeEntrypoint.sol | 8 +- contracts/test/script/DeployCore.t.sol | 48 ++-- contracts/test/stake/IPTokenSlashing.t.sol | 5 - contracts/test/stake/IPTokenStaking.t.sol | 4 +- .../test/upgrade/UpgradeEntryPoint.t.sol | 7 +- contracts/test/utils/Test.sol | 52 +--- 16 files changed, 438 insertions(+), 177 deletions(-) delete mode 100644 contracts/script/DeployCore.s.sol create mode 100644 contracts/script/GenerateAlloc.s.sol create mode 100644 contracts/script/TestPrecompileUpgrades.s.sol diff --git a/.github/workflows/ci-foundry.yml b/.github/workflows/ci-foundry.yml index 12faace7..926d7a8d 100644 --- a/.github/workflows/ci-foundry.yml +++ b/.github/workflows/ci-foundry.yml @@ -39,9 +39,10 @@ jobs: working-directory: contracts # Run lint - - name: Check lint - run: pnpm lint-check - working-directory: contracts + # TODO: Fix and unify linting + # - name: Check lint + # run: pnpm lint-check + # working-directory: contracts # first, build contracts excluding the tests and scripts. Check contract sizes in this step. - name: Run Contract Size check diff --git a/.gitignore b/.gitignore index c705c479..d63c9fb6 100644 --- a/.gitignore +++ b/.gitignore @@ -23,3 +23,6 @@ dist # Environment vars .env + +# Local Alloc file +local-alloc.json diff --git a/.pre-commit/run_solhint.sh b/.pre-commit/run_solhint.sh index 8fbc1924..b1039d39 100755 --- a/.pre-commit/run_solhint.sh +++ b/.pre-commit/run_solhint.sh @@ -2,11 +2,12 @@ # Solhint's repo doesn't support pre-commit out-of-the-box, so this script is the workaround. -VERSION="4.0.0" +# TODO: Unify and fix solhint versions in repo +# VERSION="5.0.3" -if ! which solhint 1>/dev/null || [[ $(solhint --version) != "$VERSION" ]]; then - echo "Installing solhint@$VERSION" - npm install -g solhint@$VERSION -fi +# if ! which solhint 1>/dev/null || [[ $(solhint --version) != "$VERSION" ]]; then +# echo "Installing solhint@$VERSION" +# npm install -g solhint@$VERSION +# fi -solhint $@ +# solhint $@ diff --git a/.pre-commit/run_solidity_lint.sh b/.pre-commit/run_solidity_lint.sh index b83d0570..0eb5d573 100755 --- a/.pre-commit/run_solidity_lint.sh +++ b/.pre-commit/run_solidity_lint.sh @@ -3,12 +3,13 @@ # Runs `pnpm lint-check` for every unique foundry project derived from the list # of files provided as arguments by pre-commit. -source scripts/install_foundry.sh +# TODO: Unify and fix solhint versions in repo +# source scripts/install_foundry.sh # import foundryroots -source .pre-commit/foundry_utils.sh +# source .pre-commit/foundry_utils.sh -for dir in $(foundryroots $@); do - echo "Running 'lint-check' in ./$dir" - (cd $dir && pnpm lint-check) -done +# for dir in $(foundryroots $@); do +# echo "Running 'lint-check' in ./$dir" +# (cd $dir && pnpm lint-check) +# done diff --git a/contracts/script/DeployCore.s.sol b/contracts/script/DeployCore.s.sol deleted file mode 100644 index 59aa012a..00000000 --- a/contracts/script/DeployCore.s.sol +++ /dev/null @@ -1,62 +0,0 @@ -// SPDX-License-Identifier: BUSL-1.1 -pragma solidity ^0.8.23; -/* solhint-disable no-console */ -/* solhint-disable max-line-length */ - -import { Script } from "forge-std/Script.sol"; -import { console2 } from "forge-std/console2.sol"; -import { ERC1967Proxy } from "@openzeppelin/contracts/proxy/ERC1967/ERC1967Proxy.sol"; - -import { IPTokenStaking } from "../src/protocol/IPTokenStaking.sol"; -import { IPTokenSlashing } from "../src/protocol/IPTokenSlashing.sol"; -import { UpgradeEntrypoint } from "../src/protocol/UpgradeEntrypoint.sol"; - -/** - * @title DeployCore - * @dev A script + utilities to deploy the core contracts - */ -contract DeployCore is Script { - function run() public { - // TODO: read env - address protocolAccessManagerAddr = address(0xf398C12A45Bc409b6C652E25bb0a3e702492A4ab); - require(protocolAccessManagerAddr != address(0), "address not set"); - - uint256 deployerKey = vm.envUint("IPTOKENSTAKING_DEPLOYER_KEY"); - - vm.startBroadcast(deployerKey); - - address impl = address( - new IPTokenStaking( - 1 gwei, // stakingRounding - 1000, // defaultCommissionRate, 10% - 5000, // defaultMaxCommissionRate, 50% - 500 // defaultMaxCommissionChangeRate, 5% - ) - ); - IPTokenStaking ipTokenStaking = IPTokenStaking(address(new ERC1967Proxy(impl, ""))); - ipTokenStaking.initialize( - protocolAccessManagerAddr, - 1 ether, // minStakeAmount - 1 ether, // minUnstakeAmount - 1 ether, // minRedelegateAmount - 7 days // withdrawalAddressInterval - ); - - impl = address(new IPTokenSlashing(address(ipTokenStaking))); - IPTokenSlashing ipTokenSlashing = IPTokenSlashing(address(new ERC1967Proxy(impl, ""))); - ipTokenSlashing.initialize( - protocolAccessManagerAddr, - 1 ether // unjailFee - ); - - impl = address(new UpgradeEntrypoint()); - UpgradeEntrypoint upgradeEntrypoint = UpgradeEntrypoint(address(new ERC1967Proxy(impl, ""))); - upgradeEntrypoint.initialize(protocolAccessManagerAddr); - - vm.stopBroadcast(); - - console2.log("IPTokenStaking deployed at:", address(ipTokenStaking)); - console2.log("IPTokenSlashing deployed at:", address(ipTokenSlashing)); - console2.log("UpgradeEntrypoint deployed at:", address(upgradeEntrypoint)); - } -} diff --git a/contracts/script/GenerateAlloc.s.sol b/contracts/script/GenerateAlloc.s.sol new file mode 100644 index 00000000..95a1ebf1 --- /dev/null +++ b/contracts/script/GenerateAlloc.s.sol @@ -0,0 +1,243 @@ +// SPDX-License-Identifier: BUSL-1.1 +pragma solidity ^0.8.23; +/* solhint-disable no-console */ +/* solhint-disable max-line-length */ + +import { Script } from "forge-std/Script.sol"; +import { console2 } from "forge-std/console2.sol"; +import { TransparentUpgradeableProxy } from "@openzeppelin/contracts/proxy/transparent/TransparentUpgradeableProxy.sol"; + +import { IPTokenStaking } from "../src/protocol/IPTokenStaking.sol"; +import { IPTokenSlashing } from "../src/protocol/IPTokenSlashing.sol"; +import { UpgradeEntrypoint } from "../src/protocol/UpgradeEntrypoint.sol"; + +import { EIP1967Helper } from "./utils/EIP1967Helper.sol"; +import { InitializableHelper } from "./utils/InitializableHelper.sol"; +import { Predeploys } from "../src/libraries/Predeploys.sol"; + +/** + * @title GenerateAlloc + * @dev A script to generate the alloc section of EL genesis + * - Predeploys (See src/libraries/Predeploys.sol) + * - Genesis $IP allocations (chain id dependent) + * Run it by + * forge script script/GenerateAlloc.s.sol -vvvv --chain-id + * Then, replace the contents of alloc field in EL genesis.json for the contents + * of the generated json before starting the network. + * This contract is also used by forge tests, to unify the process. + */ +contract GenerateAlloc is Script { + /** + * @notice Predeploy deployer address, used for each `new` call in this script + */ + address internal deployer = 0xDDdDddDdDdddDDddDDddDDDDdDdDDdDDdDDDDDDd; + + // Upgrade admin controls upgradeability (by being Owner of each ProxyAdmin), + // protocol admin is Owner of precompiles (admin/governance methods). + // To disable upgradeability, we transfer ProxyAdmin ownership to a dead address + address internal upgradeAdmin; + address internal protocolAdmin; + string internal dumpPath = getDumpPath(); + bool public saveState = true; + uint256 public constant MAINNET_CHAIN_ID = 0; // TBD + + /// @notice call from Test.sol to run test fast (no json saving) + function disableStateDump() external { + require(block.chainid == 31337, "Only for local tests"); + saveState = false; + } + + /// @notice call from Test.sol only + function setAdminAddresses(address upgrade, address protocol) external { + require(block.chainid == 31337, "Only for local tests"); + upgradeAdmin = upgrade; + protocolAdmin = protocol; + } + + /// @notice path where alloc file will be stored + function getDumpPath() internal view returns (string memory) { + if (block.chainid == 1513) { + return "./iliad-alloc.json"; + } else if (block.chainid == 1512) { + return "./mininet-alloc.json"; + } else if (block.chainid == 31337) { + return "./local-alloc.json"; + } else { + revert("Unsupported chain id"); + } + } + + /// @notice main script method + function run() public { + if (upgradeAdmin == address(0)) { + upgradeAdmin = vm.envAddress("UPGRADE_ADMIN_ADDRESS"); + } + require(upgradeAdmin != address(0), "upgradeAdmin not set"); + + if (protocolAdmin == address(0)) { + protocolAdmin = vm.envAddress("ADMIN_ADDRESS"); + } + require(protocolAdmin != address(0), "protocolAdmin not set"); + + vm.startPrank(deployer); + + setPredeploys(); + setAllocations(); + + // Reset so its not included state dump + vm.etch(msg.sender, ""); + vm.resetNonce(msg.sender); + vm.deal(msg.sender, 0); + + vm.etch(deployer, ""); + // Not resetting nonce + vm.deal(deployer, 0); + + vm.stopPrank(); + if (saveState) { + vm.dumpState(dumpPath); + console2.log("Alloc saved to:", dumpPath); + } + } + + function setPredeploys() internal { + setProxy(Predeploys.Staking); + setProxy(Predeploys.Slashing); + setProxy(Predeploys.Upgrades); + + setStaking(); + setSlashing(); + setUpgrade(); + } + + function setProxy(address proxyAddr) internal { + address impl = Predeploys.getImplAddress(proxyAddr); + + // set impl code to non-zero length, so it passes TransparentUpgradeableProxy constructor check + // assert it is not already set + require(impl.code.length == 0, "impl already set"); + vm.etch(impl, "00"); + + // use new, so that the immutable variable the holds the ProxyAdmin proxyAddr is set in properly in bytecode + address tmp = address(new TransparentUpgradeableProxy(impl, upgradeAdmin, "")); + vm.etch(proxyAddr, tmp.code); + + // set implempentation storage manually + EIP1967Helper.setImplementation(proxyAddr, impl); + + // set admin storage, to follow EIP1967 standard + EIP1967Helper.setAdmin(proxyAddr, EIP1967Helper.getAdmin(tmp)); + + // reset impl & tmp + vm.etch(impl, ""); + vm.etch(tmp, ""); + + // can we reset nonce here? we are using "deployer" proxyAddr + vm.resetNonce(tmp); + vm.deal(impl, 1); + vm.deal(proxyAddr, 1); + } + + /** + * @notice Setup Staking predeploy + */ + function setStaking() internal { + address impl = Predeploys.getImplAddress(Predeploys.Staking); + + address tmp = address(new IPTokenStaking( + 1 gwei, // stakingRounding + 1000, // defaultCommissionRate, 10% + 5000, // defaultMaxCommissionRate, 50% + 500 // defaultMaxCommissionChangeRate, 5% + )); + console2.log("tpm", tmp); + vm.etch(impl, tmp.code); + + // reset tmp + vm.etch(tmp, ""); + vm.store(tmp, 0, "0x"); + vm.resetNonce(tmp); + + InitializableHelper.disableInitializers(impl); + IPTokenStaking(Predeploys.Staking).initialize(protocolAdmin, 1 ether, 1 ether, 1 ether, 7 days); + + console2.log("IPTokenStaking proxy deployed at:", Predeploys.Staking); + console2.log("IPTokenStaking ProxyAdmin deployed at:", EIP1967Helper.getAdmin(Predeploys.Staking)); + console2.log("IPTokenStaking impl at:", EIP1967Helper.getImplementation(Predeploys.Staking)); + console2.log("IPTokenStaking owner:", IPTokenStaking(Predeploys.Staking).owner()); + } + + /** + * @notice Setup Slashing predeploy + */ + function setSlashing() internal { + address impl = Predeploys.getImplAddress(Predeploys.Slashing); + address tmp = address(new IPTokenSlashing(Predeploys.Staking)); + + console2.log("tpm", tmp); + vm.etch(impl, tmp.code); + + // reset tmp + vm.etch(tmp, ""); + vm.store(tmp, 0, "0x"); + vm.resetNonce(tmp); + + InitializableHelper.disableInitializers(impl); + IPTokenSlashing(Predeploys.Slashing).initialize(protocolAdmin, 1 ether); + + console2.log("IPTokenSlashing proxy deployed at:", Predeploys.Slashing); + console2.log("IPTokenSlashing ProxyAdmin deployed at:", EIP1967Helper.getAdmin(Predeploys.Slashing)); + console2.log("IPTokenSlashing impl at:", EIP1967Helper.getImplementation(Predeploys.Slashing)); + } + + /** + * @notice Setup Upgrade predeploy + */ + function setUpgrade() internal { + address impl = Predeploys.getImplAddress(Predeploys.Upgrades); + address tmp = address(new UpgradeEntrypoint()); + + console2.log("tpm", tmp); + vm.etch(impl, tmp.code); + + // reset tmp + vm.etch(tmp, ""); + vm.store(tmp, 0, "0x"); + vm.resetNonce(tmp); + + InitializableHelper.disableInitializers(impl); + UpgradeEntrypoint(Predeploys.Upgrades).initialize(protocolAdmin); + + console2.log("UpgradeEntrypoint proxy deployed at:", Predeploys.Upgrades); + console2.log("UpgradeEntrypoint ProxyAdmin deployed at:", EIP1967Helper.getAdmin(Predeploys.Upgrades)); + console2.log("UpgradeEntrypoint impl at:", EIP1967Helper.getImplementation(Predeploys.Upgrades)); + } + + function setAllocations() internal { + // EL Predeploys + vm.deal(0x0000000000000000000000000000000000000001, 1); + vm.deal(0x0000000000000000000000000000000000000001, 1); + vm.deal(0x0000000000000000000000000000000000000002, 1); + vm.deal(0x0000000000000000000000000000000000000003, 1); + vm.deal(0x0000000000000000000000000000000000000004, 1); + vm.deal(0x0000000000000000000000000000000000000005, 1); + vm.deal(0x0000000000000000000000000000000000000006, 1); + vm.deal(0x0000000000000000000000000000000000000007, 1); + vm.deal(0x0000000000000000000000000000000000000008, 1); + vm.deal(0x0000000000000000000000000000000000000009, 1); + vm.deal(0x000000000000000000000000000000000000001a, 1); + // Allocation + if (block.chainid == MAINNET_CHAIN_ID) { + // TBD + } else { + // Testnet alloc + vm.deal(0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266, 100000000 ether); + vm.deal(0xf398C12A45Bc409b6C652E25bb0a3e702492A4ab, 100000000 ether); + vm.deal(0xEcB1D051475A7e330b1DD6683cdC7823Bbcf8Dcf, 100000000 ether); + vm.deal(0x5518D1BD054782792D2783509FbE30fa9D888888, 100000000 ether); + vm.deal(0xbd39FAe873F301b53e14d365383118cD4a222222, 100000000 ether); + vm.deal(0x00FCeC044cD73e8eC6Ad771556859b00C9011111, 100000000 ether); + vm.deal(0xb5350B7CaE94C2bF6B2b56Ef6A06cC1153900000, 100000000 ether); + } + } +} diff --git a/contracts/script/TestPrecompileUpgrades.s.sol b/contracts/script/TestPrecompileUpgrades.s.sol new file mode 100644 index 00000000..76228602 --- /dev/null +++ b/contracts/script/TestPrecompileUpgrades.s.sol @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: BUSL-1.1 +pragma solidity ^0.8.23; +/* solhint-disable no-console */ +/* solhint-disable max-line-length */ + +import { Script } from "forge-std/Script.sol"; +import { console2 } from "forge-std/console2.sol"; +import { ProxyAdmin } from "@openzeppelin/contracts/proxy/transparent/ProxyAdmin.sol"; + +import { IPTokenStaking } from "../src/protocol/IPTokenStaking.sol"; +import { IPTokenSlashing } from "../src/protocol/IPTokenSlashing.sol"; +import { UpgradeEntrypoint } from "../src/protocol/UpgradeEntrypoint.sol"; +import { ITransparentUpgradeableProxy } from "@openzeppelin/contracts/proxy/transparent/TransparentUpgradeableProxy.sol"; + +import { EIP1967Helper } from "./utils/EIP1967Helper.sol"; +import { Predeploys } from "../src/libraries/Predeploys.sol"; + +abstract contract MockNewFeatures { + function foo() external pure returns(string memory) { + return "bar"; + } +} + +contract IPTokenStakingV2 is IPTokenStaking, MockNewFeatures { + constructor( + uint256 stakingRounding, + uint32 defaultCommissionRate, + uint32 defaultMaxCommissionRate, + uint32 defaultMaxCommissionChangeRate + ) IPTokenStaking(stakingRounding, defaultCommissionRate, defaultMaxCommissionRate, defaultMaxCommissionChangeRate) { + + } +} + +contract IPTokenSlashingV2 is IPTokenSlashing, MockNewFeatures { + constructor(address ipTokenStaking) IPTokenSlashing(ipTokenStaking) {} +} + +contract UpgradeEntrypointV2 is UpgradeEntrypoint, MockNewFeatures { +} + +/** + * @title TestPrecompileUpgrades + * @dev A script to test upgrading the precompile contracts + */ +contract TestPrecompileUpgrades is Script { + // To run the script: + // - Dry run + // forge script script/DeployIPTokenSlashing.s.sol --fork-url + // + // - Deploy (OK for devnet) + // forge script script/DeployIPTokenSlashing.s.sol --fork-url --broadcast + // + // - Deploy and Verify (for testnet) + function run() public { + // Read env for admin address + uint256 upgradeKey = vm.envUint("UPGRADE_ADMIN_KEY"); + address upgrader = vm.addr(upgradeKey); + console2.log("upgrader", upgrader); + vm.startBroadcast(upgradeKey); + + // ---- Staking + address newImpl = address(new IPTokenStakingV2( + 1 gwei, // stakingRounding + 1000, // defaultCommissionRate, 10% + 5000, // defaultMaxCommissionRate, 50% + 500 // defaultMaxCommissionChangeRate, 5% + )); + ProxyAdmin proxyAdmin = ProxyAdmin( + EIP1967Helper.getAdmin(Predeploys.Staking) + ); + console2.log("staking proxy admin", address(proxyAdmin)); + console2.log("staking proxy admin owner", proxyAdmin.owner()); + proxyAdmin.upgradeAndCall( + ITransparentUpgradeableProxy(Predeploys.Staking), + newImpl, + "" + ); + if (EIP1967Helper.getImplementation(Predeploys.Staking) != newImpl) { + revert("Staking not upgraded"); + } + if (keccak256(abi.encode(IPTokenStakingV2(Predeploys.Staking).foo())) != keccak256(abi.encode("bar"))) { + revert("Upgraded to wrong iface"); + } + + // ---- Slashing + newImpl = address(new IPTokenSlashingV2( + Predeploys.Staking + )); + proxyAdmin = ProxyAdmin( + EIP1967Helper.getAdmin(Predeploys.Slashing) + ); + console2.log("slashing proxy admin", address(proxyAdmin)); + console2.log("slashing proxy admin owner", proxyAdmin.owner()); + proxyAdmin.upgradeAndCall( + ITransparentUpgradeableProxy(Predeploys.Slashing), + newImpl, + "" + ); + if (EIP1967Helper.getImplementation(Predeploys.Slashing) != newImpl) { + revert("Slashing not upgraded"); + } + if (keccak256(abi.encode(IPTokenSlashingV2(Predeploys.Slashing).foo())) != keccak256(abi.encode("bar"))) { + revert("Upgraded to wrong iface"); + } + + // ---- Upgrades + newImpl = address(new UpgradeEntrypointV2()); + proxyAdmin = ProxyAdmin( + EIP1967Helper.getAdmin(Predeploys.Upgrades) + ); + console2.log("upgrades proxy admin", address(proxyAdmin)); + console2.log("upgrades proxy admin owner", proxyAdmin.owner()); + + proxyAdmin.upgradeAndCall( + ITransparentUpgradeableProxy(Predeploys.Upgrades), + newImpl, + "" + ); + if (keccak256(abi.encode(UpgradeEntrypointV2(Predeploys.Upgrades).foo())) != keccak256(abi.encode("bar"))) { + revert("Upgraded to wrong iface"); + } + if (EIP1967Helper.getImplementation(Predeploys.Upgrades) != newImpl) { + revert("UpgradeEntrypoint not upgraded"); + } + vm.stopBroadcast(); + } +} diff --git a/contracts/src/libraries/Predeploys.sol b/contracts/src/libraries/Predeploys.sol index e5be8626..114d1b20 100644 --- a/contracts/src/libraries/Predeploys.sol +++ b/contracts/src/libraries/Predeploys.sol @@ -13,7 +13,7 @@ library Predeploys { address internal constant WIP = 0x1513000000000000000000000000000000000000; address internal constant Staking = 0xCCcCcC0000000000000000000000000000000001; address internal constant Slashing = 0xCccCCC0000000000000000000000000000000002; - address internal constant Upgrade = 0xccCCcc0000000000000000000000000000000003; + address internal constant Upgrades = 0xccCCcc0000000000000000000000000000000003; /// @notice Return true if `addr` is not proxied function notProxied(address addr) internal pure returns (bool) { @@ -21,7 +21,7 @@ library Predeploys { } /// @notice Return implementation address for a proxied predeploy - function impl(address addr) internal pure returns (address) { + function getImplAddress(address addr) internal pure returns (address) { require(isPredeploy(addr), "Predeploys: not a predeploy"); require(!notProxied(addr), "Predeploys: not proxied"); @@ -31,7 +31,7 @@ library Predeploys { /// @notice Return true if `addr` is an active predeploy function isActivePredeploy(address addr) internal pure returns (bool) { - return addr == WIP || addr == Staking || addr == Slashing || addr == Upgrade; + return addr == WIP || addr == Staking || addr == Slashing || addr == Upgrades; } /// @notice Return true if `addr` is in some predeploy namespace diff --git a/contracts/src/protocol/IPTokenSlashing.sol b/contracts/src/protocol/IPTokenSlashing.sol index c2fa3f2f..f99f113b 100644 --- a/contracts/src/protocol/IPTokenSlashing.sol +++ b/contracts/src/protocol/IPTokenSlashing.sol @@ -2,7 +2,6 @@ pragma solidity ^0.8.23; import { Ownable2StepUpgradeable } from "@openzeppelin/contracts-upgradeable/access/Ownable2StepUpgradeable.sol"; -import { UUPSUpgradeable } from "@openzeppelin/contracts-upgradeable/proxy/utils/UUPSUpgradeable.sol"; import { IIPTokenSlashing } from "../interfaces/IIPTokenSlashing.sol"; import { IPTokenStaking } from "./IPTokenStaking.sol"; @@ -13,7 +12,7 @@ import { Secp256k1 } from "../libraries/Secp256k1.sol"; * @notice The EVM interface to the consensus chain's x/slashing module. Calls are proxied to the consensus chain, but * not executed synchronously; execution is left to the consensus chain, which may fail. */ -contract IPTokenSlashing is IIPTokenSlashing, Ownable2StepUpgradeable, UUPSUpgradeable { +contract IPTokenSlashing is IIPTokenSlashing, Ownable2StepUpgradeable { /// @notice IPTokenStaking contract address. IPTokenStaking public immutable IP_TOKEN_STAKING; @@ -28,7 +27,6 @@ contract IPTokenSlashing is IIPTokenSlashing, Ownable2StepUpgradeable, UUPSUpgra /// @notice Initializes the contract. function initialize(address accessManager, uint256 newUnjailFee) public initializer { - __UUPSUpgradeable_init(); __Ownable_init(accessManager); require(newUnjailFee > 0, "IPTokenSlashing: Invalid unjail fee"); unjailFee = newUnjailFee; @@ -96,8 +94,4 @@ contract IPTokenSlashing is IIPTokenSlashing, Ownable2StepUpgradeable, UUPSUpgra (bool validatorExists, , , , , ) = IP_TOKEN_STAKING.validatorMetadata(validatorCmpPubkey); require(validatorExists, "IPTokenSlashing: Validator does not exist"); } - - /// @dev Hook to authorize the upgrade according to UUPSUpgradeable - /// @param newImplementation The address of the new implementation - function _authorizeUpgrade(address newImplementation) internal override onlyOwner {} } diff --git a/contracts/src/protocol/IPTokenStaking.sol b/contracts/src/protocol/IPTokenStaking.sol index 59457482..c64cf028 100644 --- a/contracts/src/protocol/IPTokenStaking.sol +++ b/contracts/src/protocol/IPTokenStaking.sol @@ -3,7 +3,6 @@ pragma solidity ^0.8.23; import { Ownable2StepUpgradeable } from "@openzeppelin/contracts-upgradeable/access/Ownable2StepUpgradeable.sol"; import { ReentrancyGuardUpgradeable } from "@openzeppelin/contracts-upgradeable/utils/ReentrancyGuardUpgradeable.sol"; -import { UUPSUpgradeable } from "@openzeppelin/contracts-upgradeable/proxy/utils/UUPSUpgradeable.sol"; import { EnumerableSet } from "@openzeppelin/contracts/utils/structs/EnumerableSet.sol"; import { IIPTokenStaking } from "../interfaces/IIPTokenStaking.sol"; @@ -13,7 +12,7 @@ import { Secp256k1 } from "../libraries/Secp256k1.sol"; * @title IPTokenStaking * @notice The deposit contract for IP token staked validators. */ -contract IPTokenStaking is IIPTokenStaking, Ownable2StepUpgradeable, ReentrancyGuardUpgradeable, UUPSUpgradeable { +contract IPTokenStaking is IIPTokenStaking, Ownable2StepUpgradeable, ReentrancyGuardUpgradeable { using EnumerableSet for EnumerableSet.AddressSet; /// @notice Default commission rate for a validator. Out of 100%, or 10_000. @@ -92,7 +91,6 @@ contract IPTokenStaking is IIPTokenStaking, Ownable2StepUpgradeable, ReentrancyG uint256 _withdrawalAddressChangeInterval ) public initializer { __ReentrancyGuard_init(); - __UUPSUpgradeable_init(); __Ownable_init(accessManager); _setMinStakeAmount(_minStakeAmount); _setMinUnstakeAmount(_minUnstakeAmount); @@ -549,8 +547,4 @@ contract IPTokenStaking is IIPTokenStaking, Ownable2StepUpgradeable, ReentrancyG (bool success, ) = msg.sender.call{ value: remainder }(""); require(success, "IPTokenStaking: Failed to refund remainder"); } - - /// @dev Hook to authorize the upgrade according to UUPSUpgradeable - /// @param newImplementation The address of the new implementation - function _authorizeUpgrade(address newImplementation) internal override onlyOwner {} } diff --git a/contracts/src/protocol/UpgradeEntrypoint.sol b/contracts/src/protocol/UpgradeEntrypoint.sol index f7f387dd..69ce097b 100644 --- a/contracts/src/protocol/UpgradeEntrypoint.sol +++ b/contracts/src/protocol/UpgradeEntrypoint.sol @@ -2,7 +2,6 @@ pragma solidity ^0.8.23; import { Ownable2StepUpgradeable } from "@openzeppelin/contracts-upgradeable/access/Ownable2StepUpgradeable.sol"; -import { UUPSUpgradeable } from "@openzeppelin/contracts-upgradeable/proxy/utils/UUPSUpgradeable.sol"; import { IUpgradeEntrypoint } from "../interfaces/IUpgradeEntrypoint.sol"; @@ -10,7 +9,7 @@ import { IUpgradeEntrypoint } from "../interfaces/IUpgradeEntrypoint.sol"; * @title UpgradeEntrypoint * @notice Entrypoint contract for submitting x/upgrade module actions. */ -contract UpgradeEntrypoint is IUpgradeEntrypoint, Ownable2StepUpgradeable, UUPSUpgradeable { +contract UpgradeEntrypoint is IUpgradeEntrypoint, Ownable2StepUpgradeable { constructor() { _disableInitializers(); } @@ -18,7 +17,6 @@ contract UpgradeEntrypoint is IUpgradeEntrypoint, Ownable2StepUpgradeable, UUPSU /// @notice Initializes the contract. function initialize(address accessManager) public initializer { require(accessManager != address(0), "UpgradeEntrypoint: accessManager cannot be zero address"); - __UUPSUpgradeable_init(); __Ownable_init(accessManager); } @@ -34,8 +32,4 @@ contract UpgradeEntrypoint is IUpgradeEntrypoint, Ownable2StepUpgradeable, UUPSU function planUpgrade(string calldata name, int64 height, string calldata info) external onlyOwner { emit SoftwareUpgrade({ name: name, height: height, info: info }); } - - /// @dev Hook to authorize the upgrade according to UUPSUpgradeable - /// @param newImplementation The address of the new implementation - function _authorizeUpgrade(address newImplementation) internal override onlyOwner {} } diff --git a/contracts/test/script/DeployCore.t.sol b/contracts/test/script/DeployCore.t.sol index 1ce3f7be..285fa7c3 100644 --- a/contracts/test/script/DeployCore.t.sol +++ b/contracts/test/script/DeployCore.t.sol @@ -1,30 +1,30 @@ -// SPDX-License-Identifier: BUSL-1.1 -pragma solidity ^0.8.23; -/* solhint-disable no-console */ -/* solhint-disable max-line-length */ -/// NOTE: pragma allowlist-secret must be inline (same line as the pubkey hex string) to avoid false positive -/// flag "Hex High Entropy String" in CI run detect-secrets +// // SPDX-License-Identifier: BUSL-1.1 +// pragma solidity ^0.8.23; +// /* solhint-disable no-console */ +// /* solhint-disable max-line-length */ +// /// NOTE: pragma allowlist-secret must be inline (same line as the pubkey hex string) to avoid false positive +// /// flag "Hex High Entropy String" in CI run detect-secrets -import { Test } from "forge-std/Test.sol"; +// import { Test } from "forge-std/Test.sol"; -import { DeployCore } from "../../script/DeployCore.s.sol"; +// import { DeployCore } from "../../script/DeployCore.s.sol"; -contract DeployCoreTest is Test { - DeployCore private deployCore; +// contract DeployCoreTest is Test { +// DeployCore private deployCore; - function setUp() public { - deployCore = new DeployCore(); - } +// function setUp() public { +// deployCore = new DeployCore(); +// } - function testDeployDeployCore_run() public { - // Network shall not deploy the IPTokenStaking contract if IPTOKENSTAKING_DEPLOYER_KEY not set. - vm.chainId(1513); - // solhint-disable - vm.expectRevert('vm.envUint: environment variable "IPTOKENSTAKING_DEPLOYER_KEY" not found'); - deployCore.run(); +// function testDeployDeployCore_run() public { +// // Network shall not deploy the IPTokenStaking contract if IPTOKENSTAKING_DEPLOYER_KEY not set. +// vm.chainId(1513); +// // solhint-disable +// vm.expectRevert('vm.envUint: environment variable "IPTOKENSTAKING_DEPLOYER_KEY" not found'); +// deployCore.run(); - // Network shall deploy the IPTokenStaking contract. - vm.setEnv("IPTOKENSTAKING_DEPLOYER_KEY", "0x123456789abcdef"); - deployCore.run(); - } -} +// // Network shall deploy the IPTokenStaking contract. +// vm.setEnv("IPTOKENSTAKING_DEPLOYER_KEY", "0x123456789abcdef"); +// deployCore.run(); +// } +// } diff --git a/contracts/test/stake/IPTokenSlashing.t.sol b/contracts/test/stake/IPTokenSlashing.t.sol index c36e2c43..cf4bedf8 100644 --- a/contracts/test/stake/IPTokenSlashing.t.sol +++ b/contracts/test/stake/IPTokenSlashing.t.sol @@ -24,11 +24,6 @@ contract IPTokenSlashingTest is Test { emit Received(msg.sender, msg.value); } - function setUp() public override { - setStaking(); - setSlashing(); - } - function testIPTokenSlashing_Parameters() public view { assertEq(ipTokenSlashing.unjailFee(), 1 ether); } diff --git a/contracts/test/stake/IPTokenStaking.t.sol b/contracts/test/stake/IPTokenStaking.t.sol index 3a3df3ae..f1e84f3e 100644 --- a/contracts/test/stake/IPTokenStaking.t.sol +++ b/contracts/test/stake/IPTokenStaking.t.sol @@ -27,8 +27,8 @@ contract IPTokenStakingTest is Test { emit Received(msg.sender, msg.value); } - function setUp() public override { - setStaking(); + function setUp() public virtual override { + super.setUp(); vm.assertEq(delegatorCmpPubkey, Secp256k1.compressPublicKey(delegatorUncmpPubkey)); } diff --git a/contracts/test/upgrade/UpgradeEntryPoint.t.sol b/contracts/test/upgrade/UpgradeEntryPoint.t.sol index 35ddc803..1cd2a7f6 100644 --- a/contracts/test/upgrade/UpgradeEntryPoint.t.sol +++ b/contracts/test/upgrade/UpgradeEntryPoint.t.sol @@ -6,16 +6,11 @@ pragma solidity ^0.8.23; /// flag "Hex High Entropy String" in CI run detect-secrets import { UpgradeEntrypoint, IUpgradeEntrypoint } from "../../src/protocol/UpgradeEntrypoint.sol"; -import { ERC1967Proxy } from "@openzeppelin/contracts/proxy/ERC1967/ERC1967Proxy.sol"; +import { TransparentUpgradeableProxy } from "@openzeppelin/contracts/proxy/transparent/TransparentUpgradeableProxy.sol"; import { Test } from "../utils/Test.sol"; contract UpgradeEntrypointTest is Test { - function setUp() public override { - address impl = address(new UpgradeEntrypoint()); - bytes memory initializer = abi.encodeCall(UpgradeEntrypoint.initialize, (admin)); - upgradeEntrypoint = UpgradeEntrypoint(address(new ERC1967Proxy(impl, initializer))); - } function testUpgradeEntrypoint_planUpgrade() public { // Network shall allow the protocol owner to submit an upgrade plan. diff --git a/contracts/test/utils/Test.sol b/contracts/test/utils/Test.sol index b5509d88..28446b72 100644 --- a/contracts/test/utils/Test.sol +++ b/contracts/test/utils/Test.sol @@ -5,56 +5,30 @@ pragma solidity ^0.8.23; import { console2 } from "forge-std/console2.sol"; import { Test as ForgeTest } from "forge-std/Test.sol"; -import { ERC1967Proxy } from "@openzeppelin/contracts/proxy/ERC1967/ERC1967Proxy.sol"; +import { TransparentUpgradeableProxy } from "@openzeppelin/contracts/proxy/transparent/TransparentUpgradeableProxy.sol"; import { IPTokenStaking } from "../../src/protocol/IPTokenStaking.sol"; import { IPTokenSlashing } from "../../src/protocol/IPTokenSlashing.sol"; import { UpgradeEntrypoint } from "../../src/protocol/UpgradeEntrypoint.sol"; +import { Predeploys } from "../../src/libraries/Predeploys.sol"; + +import { GenerateAlloc } from "../../script/GenerateAlloc.s.sol"; contract Test is ForgeTest { address internal admin = address(0x123); + address internal upgradeAdmin = address(0x456); IPTokenStaking internal ipTokenStaking; IPTokenSlashing internal ipTokenSlashing; UpgradeEntrypoint internal upgradeEntrypoint; - function setUp() public virtual { - setStaking(); - setSlashing(); - // setUpgrade(); - } - - function setStaking() internal { - address impl = address( - new IPTokenStaking( - 1 gwei, // stakingRounding - 1000, // defaultCommissionRate, 10% - 5000, // defaultMaxCommissionRate, 50% - 500 // defaultMaxCommissionChangeRate, 5% - ) - ); - bytes memory initializer = abi.encodeCall( - IPTokenStaking.initialize, - (admin, 1 ether, 1 ether, 1 ether, 7 days) - ); - ipTokenStaking = IPTokenStaking(address(new ERC1967Proxy(impl, initializer))); - } - - function setSlashing() internal { - require(address(ipTokenStaking) != address(0), "ipTokenStaking not set"); - - address impl = address(new IPTokenSlashing(address(ipTokenStaking))); - - bytes memory initializer = abi.encodeCall(IPTokenSlashing.initialize, (admin, 1 ether)); - ipTokenSlashing = IPTokenSlashing(address(new ERC1967Proxy(impl, initializer))); - - console2.log("unjailFee:", ipTokenSlashing.unjailFee()); - } - - function setUpgrade() internal { - address impl = address(new UpgradeEntrypoint()); - - bytes memory initializer = abi.encodeWithSignature("initialize(address)", admin); - upgradeEntrypoint = UpgradeEntrypoint(address(new ERC1967Proxy(impl, initializer))); + function setUp() virtual public { + GenerateAlloc initializer = new GenerateAlloc(); + initializer.disableStateDump(); // Faster tests. Don't call to verify JSON output + initializer.setAdminAddresses(upgradeAdmin, admin); + initializer.run(); + ipTokenStaking = IPTokenStaking(Predeploys.Staking); + ipTokenSlashing = IPTokenSlashing(Predeploys.Slashing); + upgradeEntrypoint = UpgradeEntrypoint(Predeploys.Upgrades); } } From 5e97596ed5b80ee958d790657d623c39e46d53a1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 9 Oct 2024 09:21:10 -0700 Subject: [PATCH 27/29] build(deps): bump github.com/spf13/cobra from 1.8.0 to 1.8.1 (#19) Bumps [github.com/spf13/cobra](https://github.com/spf13/cobra) from 1.8.0 to 1.8.1. - [Release notes](https://github.com/spf13/cobra/releases) - [Commits](https://github.com/spf13/cobra/compare/v1.8.0...v1.8.1) --- updated-dependencies: - dependency-name: github.com/spf13/cobra dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Jongwon Park --- go.mod | 2 +- go.sum | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 6cc516df..bbf04791 100644 --- a/go.mod +++ b/go.mod @@ -32,7 +32,7 @@ require ( github.com/leodido/go-conventionalcommits v0.12.0 github.com/muesli/termenv v0.15.2 github.com/pkg/errors v0.9.1 - github.com/spf13/cobra v1.8.0 + github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.19.0 github.com/stretchr/testify v1.9.0 diff --git a/go.sum b/go.sum index b13d3bb4..71cbeedd 100644 --- a/go.sum +++ b/go.sum @@ -402,7 +402,6 @@ github.com/cosmos/ledger-cosmos-go v0.13.3 h1:7ehuBGuyIytsXbd4MP43mLeoN2LTOEnk5n github.com/cosmos/ledger-cosmos-go v0.13.3/go.mod h1:HENcEP+VtahZFw38HZ3+LS3Iv5XV6svsnkk9vdJtLr8= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/crate-crypto/go-ipa v0.0.0-20240223125850-b1e8a79f509c h1:uQYC5Z1mdLRPrZhHjHxufI8+2UG/i25QG92j0Er9p6I= @@ -1143,8 +1142,8 @@ github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= From 32308f1e8ea60796bfb169ba8a3dcf71acba754d Mon Sep 17 00:00:00 2001 From: Zerui Ge Date: Wed, 9 Oct 2024 09:29:34 -0700 Subject: [PATCH 28/29] feat(api): add epochs api (#176) * feat(api): add epochs api * feat(api): add epoch info api --- client/app/app.go | 5 + client/server/epochs.go | 48 +++ client/server/server.go | 3 + client/x/epochs/keeper/grpc_query.go | 16 +- client/x/epochs/keeper/grpc_query_test.go | 2 +- client/x/epochs/types/query.pb.go | 339 ++++++++++++---------- client/x/epochs/types/query.proto | 20 +- 7 files changed, 253 insertions(+), 180 deletions(-) create mode 100644 client/server/epochs.go diff --git a/client/app/app.go b/client/app/app.go index 6927b903..e7131079 100644 --- a/client/app/app.go +++ b/client/app/app.go @@ -23,6 +23,7 @@ import ( "github.com/piplabs/story/client/app/keepers" "github.com/piplabs/story/client/comet" + epochskeeper "github.com/piplabs/story/client/x/epochs/keeper" evmstakingkeeper "github.com/piplabs/story/client/x/evmstaking/keeper" mintkeeper "github.com/piplabs/story/client/x/mint/keeper" "github.com/piplabs/story/lib/errors" @@ -212,3 +213,7 @@ func (a App) GetUpgradeKeeper() *upgradekeeper.Keeper { func (a App) GetMintKeeper() mintkeeper.Keeper { return a.Keepers.MintKeeper } + +func (a App) GetEpochsKeeper() *epochskeeper.Keeper { + return a.Keepers.EpochsKeeper +} diff --git a/client/server/epochs.go b/client/server/epochs.go new file mode 100644 index 00000000..55f9da1d --- /dev/null +++ b/client/server/epochs.go @@ -0,0 +1,48 @@ +package server + +import ( + "net/http" + + "github.com/gorilla/mux" + + "github.com/piplabs/story/client/server/utils" + "github.com/piplabs/story/client/x/epochs/keeper" + epochstypes "github.com/piplabs/story/client/x/epochs/types" +) + +func (s *Server) initEpochsRoute() { + s.httpMux.HandleFunc("/epochs/epoch_infos", utils.SimpleWrap(s.aminoCodec, s.GetEpochInfos)) + s.httpMux.HandleFunc("/epochs/epoch_infos/{identifier}", utils.SimpleWrap(s.aminoCodec, s.GetEpochInfo)) +} + +// GetEpochInfos queries running epochInfos. +func (s *Server) GetEpochInfos(r *http.Request) (resp any, err error) { + queryContext, err := s.createQueryContextByHeader(r) + if err != nil { + return nil, err + } + + queryResp, err := keeper.NewQuerier(*s.store.GetEpochsKeeper()).GetEpochInfos(queryContext, &epochstypes.GetEpochInfosRequest{}) + if err != nil { + return nil, err + } + + return queryResp, nil +} + +// GetEpochInfo queries epoch info of specified identifier. +func (s *Server) GetEpochInfo(r *http.Request) (resp any, err error) { + queryContext, err := s.createQueryContextByHeader(r) + if err != nil { + return nil, err + } + + queryResp, err := keeper.NewQuerier(*s.store.GetEpochsKeeper()).GetEpochInfo(queryContext, &epochstypes.GetEpochInfoRequest{ + Identifier: mux.Vars(r)["identifier"], + }) + if err != nil { + return nil, err + } + + return queryResp, nil +} diff --git a/client/server/server.go b/client/server/server.go index d866c549..6b33fcff 100644 --- a/client/server/server.go +++ b/client/server/server.go @@ -27,6 +27,7 @@ import ( "github.com/gorilla/handlers" "github.com/gorilla/mux" + epochskeeper "github.com/piplabs/story/client/x/epochs/keeper" evmstakingkeeper "github.com/piplabs/story/client/x/evmstaking/keeper" mintkeeper "github.com/piplabs/story/client/x/mint/keeper" ) @@ -41,6 +42,7 @@ type Store interface { GetDistrKeeper() distrkeeper.Keeper GetUpgradeKeeper() *upgradekeeper.Keeper GetMintKeeper() mintkeeper.Keeper + GetEpochsKeeper() *epochskeeper.Keeper } type Server struct { @@ -124,6 +126,7 @@ func (s *Server) registerHandle() { s.initStakingRoute() s.initUpgradeRoute() s.initMintRoute() + s.initEpochsRoute() } func (s *Server) createQueryContextByHeader(r *http.Request) (sdk.Context, error) { diff --git a/client/x/epochs/keeper/grpc_query.go b/client/x/epochs/keeper/grpc_query.go index cd26d019..7a7d5f60 100644 --- a/client/x/epochs/keeper/grpc_query.go +++ b/client/x/epochs/keeper/grpc_query.go @@ -23,16 +23,16 @@ func NewQuerier(k Keeper) Querier { return Querier{Keeper: k} } -// EpochInfos provide running epochInfos. -func (q Querier) EpochInfos(ctx context.Context, _ *types.QueryEpochsInfoRequest) (*types.QueryEpochsInfoResponse, error) { +// GetEpochInfos provide running epochInfos. +func (q Querier) GetEpochInfos(ctx context.Context, _ *types.GetEpochInfosRequest) (*types.GetEpochInfosResponse, error) { epochs, err := q.Keeper.AllEpochInfos(ctx) - return &types.QueryEpochsInfoResponse{ + return &types.GetEpochInfosResponse{ Epochs: epochs, }, err } -// CurrentEpoch provides current epoch of specified identifier. -func (q Querier) CurrentEpoch(ctx context.Context, req *types.QueryCurrentEpochRequest) (*types.QueryCurrentEpochResponse, error) { +// GetEpochInfo provide epoch info of specified identifier. +func (q Querier) GetEpochInfo(ctx context.Context, req *types.GetEpochInfoRequest) (*types.GetEpochInfoResponse, error) { if req == nil { return nil, status.Error(codes.InvalidArgument, "empty request") } @@ -40,12 +40,12 @@ func (q Querier) CurrentEpoch(ctx context.Context, req *types.QueryCurrentEpochR return nil, status.Error(codes.InvalidArgument, "identifier is empty") } - info, err := q.Keeper.EpochInfo.Get(ctx, req.Identifier) + info, err := q.Keeper.GetEpochInfo(ctx, req.Identifier) if err != nil { return nil, errors.New("not available identifier") } - return &types.QueryCurrentEpochResponse{ - CurrentEpoch: info.CurrentEpoch, + return &types.GetEpochInfoResponse{ + Epoch: info, }, nil } diff --git a/client/x/epochs/keeper/grpc_query_test.go b/client/x/epochs/keeper/grpc_query_test.go index 30fc6dba..cdc0e4cb 100644 --- a/client/x/epochs/keeper/grpc_query_test.go +++ b/client/x/epochs/keeper/grpc_query_test.go @@ -9,7 +9,7 @@ func (s *KeeperTestSuite) TestQueryEpochInfos() { queryClient := s.queryClient // Check that querying epoch infos on default genesis returns the default genesis epoch infos - epochInfosResponse, err := queryClient.EpochInfos(s.Ctx, &types.QueryEpochsInfoRequest{}) + epochInfosResponse, err := queryClient.GetEpochInfos(s.Ctx, &types.GetEpochInfosRequest{}) s.Require().NoError(err) s.Require().Len(epochInfosResponse.Epochs, 4) expectedEpochs := types.DefaultGenesis().Epochs diff --git a/client/x/epochs/types/query.pb.go b/client/x/epochs/types/query.pb.go index 29e534b2..8921ff5b 100644 --- a/client/x/epochs/types/query.pb.go +++ b/client/x/epochs/types/query.pb.go @@ -29,21 +29,21 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -type QueryEpochsInfoRequest struct { +type GetEpochInfosRequest struct { } -func (m *QueryEpochsInfoRequest) Reset() { *m = QueryEpochsInfoRequest{} } -func (m *QueryEpochsInfoRequest) String() string { return proto.CompactTextString(m) } -func (*QueryEpochsInfoRequest) ProtoMessage() {} -func (*QueryEpochsInfoRequest) Descriptor() ([]byte, []int) { +func (m *GetEpochInfosRequest) Reset() { *m = GetEpochInfosRequest{} } +func (m *GetEpochInfosRequest) String() string { return proto.CompactTextString(m) } +func (*GetEpochInfosRequest) ProtoMessage() {} +func (*GetEpochInfosRequest) Descriptor() ([]byte, []int) { return fileDescriptor_78cd7c4fa831b33b, []int{0} } -func (m *QueryEpochsInfoRequest) XXX_Unmarshal(b []byte) error { +func (m *GetEpochInfosRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *QueryEpochsInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *GetEpochInfosRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_QueryEpochsInfoRequest.Marshal(b, m, deterministic) + return xxx_messageInfo_GetEpochInfosRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -53,34 +53,34 @@ func (m *QueryEpochsInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]by return b[:n], nil } } -func (m *QueryEpochsInfoRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryEpochsInfoRequest.Merge(m, src) +func (m *GetEpochInfosRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetEpochInfosRequest.Merge(m, src) } -func (m *QueryEpochsInfoRequest) XXX_Size() int { +func (m *GetEpochInfosRequest) XXX_Size() int { return m.Size() } -func (m *QueryEpochsInfoRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryEpochsInfoRequest.DiscardUnknown(m) +func (m *GetEpochInfosRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetEpochInfosRequest.DiscardUnknown(m) } -var xxx_messageInfo_QueryEpochsInfoRequest proto.InternalMessageInfo +var xxx_messageInfo_GetEpochInfosRequest proto.InternalMessageInfo -type QueryEpochsInfoResponse struct { +type GetEpochInfosResponse struct { Epochs []EpochInfo `protobuf:"bytes,1,rep,name=epochs,proto3" json:"epochs"` } -func (m *QueryEpochsInfoResponse) Reset() { *m = QueryEpochsInfoResponse{} } -func (m *QueryEpochsInfoResponse) String() string { return proto.CompactTextString(m) } -func (*QueryEpochsInfoResponse) ProtoMessage() {} -func (*QueryEpochsInfoResponse) Descriptor() ([]byte, []int) { +func (m *GetEpochInfosResponse) Reset() { *m = GetEpochInfosResponse{} } +func (m *GetEpochInfosResponse) String() string { return proto.CompactTextString(m) } +func (*GetEpochInfosResponse) ProtoMessage() {} +func (*GetEpochInfosResponse) Descriptor() ([]byte, []int) { return fileDescriptor_78cd7c4fa831b33b, []int{1} } -func (m *QueryEpochsInfoResponse) XXX_Unmarshal(b []byte) error { +func (m *GetEpochInfosResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *QueryEpochsInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *GetEpochInfosResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_QueryEpochsInfoResponse.Marshal(b, m, deterministic) + return xxx_messageInfo_GetEpochInfosResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -90,41 +90,41 @@ func (m *QueryEpochsInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]b return b[:n], nil } } -func (m *QueryEpochsInfoResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryEpochsInfoResponse.Merge(m, src) +func (m *GetEpochInfosResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetEpochInfosResponse.Merge(m, src) } -func (m *QueryEpochsInfoResponse) XXX_Size() int { +func (m *GetEpochInfosResponse) XXX_Size() int { return m.Size() } -func (m *QueryEpochsInfoResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryEpochsInfoResponse.DiscardUnknown(m) +func (m *GetEpochInfosResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetEpochInfosResponse.DiscardUnknown(m) } -var xxx_messageInfo_QueryEpochsInfoResponse proto.InternalMessageInfo +var xxx_messageInfo_GetEpochInfosResponse proto.InternalMessageInfo -func (m *QueryEpochsInfoResponse) GetEpochs() []EpochInfo { +func (m *GetEpochInfosResponse) GetEpochs() []EpochInfo { if m != nil { return m.Epochs } return nil } -type QueryCurrentEpochRequest struct { +type GetEpochInfoRequest struct { Identifier string `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` } -func (m *QueryCurrentEpochRequest) Reset() { *m = QueryCurrentEpochRequest{} } -func (m *QueryCurrentEpochRequest) String() string { return proto.CompactTextString(m) } -func (*QueryCurrentEpochRequest) ProtoMessage() {} -func (*QueryCurrentEpochRequest) Descriptor() ([]byte, []int) { +func (m *GetEpochInfoRequest) Reset() { *m = GetEpochInfoRequest{} } +func (m *GetEpochInfoRequest) String() string { return proto.CompactTextString(m) } +func (*GetEpochInfoRequest) ProtoMessage() {} +func (*GetEpochInfoRequest) Descriptor() ([]byte, []int) { return fileDescriptor_78cd7c4fa831b33b, []int{2} } -func (m *QueryCurrentEpochRequest) XXX_Unmarshal(b []byte) error { +func (m *GetEpochInfoRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *QueryCurrentEpochRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *GetEpochInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_QueryCurrentEpochRequest.Marshal(b, m, deterministic) + return xxx_messageInfo_GetEpochInfoRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -134,41 +134,41 @@ func (m *QueryCurrentEpochRequest) XXX_Marshal(b []byte, deterministic bool) ([] return b[:n], nil } } -func (m *QueryCurrentEpochRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryCurrentEpochRequest.Merge(m, src) +func (m *GetEpochInfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetEpochInfoRequest.Merge(m, src) } -func (m *QueryCurrentEpochRequest) XXX_Size() int { +func (m *GetEpochInfoRequest) XXX_Size() int { return m.Size() } -func (m *QueryCurrentEpochRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryCurrentEpochRequest.DiscardUnknown(m) +func (m *GetEpochInfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetEpochInfoRequest.DiscardUnknown(m) } -var xxx_messageInfo_QueryCurrentEpochRequest proto.InternalMessageInfo +var xxx_messageInfo_GetEpochInfoRequest proto.InternalMessageInfo -func (m *QueryCurrentEpochRequest) GetIdentifier() string { +func (m *GetEpochInfoRequest) GetIdentifier() string { if m != nil { return m.Identifier } return "" } -type QueryCurrentEpochResponse struct { - CurrentEpoch int64 `protobuf:"varint,1,opt,name=current_epoch,json=currentEpoch,proto3" json:"current_epoch,omitempty"` +type GetEpochInfoResponse struct { + Epoch EpochInfo `protobuf:"bytes,1,opt,name=epoch,proto3" json:"epoch"` } -func (m *QueryCurrentEpochResponse) Reset() { *m = QueryCurrentEpochResponse{} } -func (m *QueryCurrentEpochResponse) String() string { return proto.CompactTextString(m) } -func (*QueryCurrentEpochResponse) ProtoMessage() {} -func (*QueryCurrentEpochResponse) Descriptor() ([]byte, []int) { +func (m *GetEpochInfoResponse) Reset() { *m = GetEpochInfoResponse{} } +func (m *GetEpochInfoResponse) String() string { return proto.CompactTextString(m) } +func (*GetEpochInfoResponse) ProtoMessage() {} +func (*GetEpochInfoResponse) Descriptor() ([]byte, []int) { return fileDescriptor_78cd7c4fa831b33b, []int{3} } -func (m *QueryCurrentEpochResponse) XXX_Unmarshal(b []byte) error { +func (m *GetEpochInfoResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *QueryCurrentEpochResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *GetEpochInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_QueryCurrentEpochResponse.Marshal(b, m, deterministic) + return xxx_messageInfo_GetEpochInfoResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -178,59 +178,58 @@ func (m *QueryCurrentEpochResponse) XXX_Marshal(b []byte, deterministic bool) ([ return b[:n], nil } } -func (m *QueryCurrentEpochResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryCurrentEpochResponse.Merge(m, src) +func (m *GetEpochInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetEpochInfoResponse.Merge(m, src) } -func (m *QueryCurrentEpochResponse) XXX_Size() int { +func (m *GetEpochInfoResponse) XXX_Size() int { return m.Size() } -func (m *QueryCurrentEpochResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryCurrentEpochResponse.DiscardUnknown(m) +func (m *GetEpochInfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetEpochInfoResponse.DiscardUnknown(m) } -var xxx_messageInfo_QueryCurrentEpochResponse proto.InternalMessageInfo +var xxx_messageInfo_GetEpochInfoResponse proto.InternalMessageInfo -func (m *QueryCurrentEpochResponse) GetCurrentEpoch() int64 { +func (m *GetEpochInfoResponse) GetEpoch() EpochInfo { if m != nil { - return m.CurrentEpoch + return m.Epoch } - return 0 + return EpochInfo{} } func init() { - proto.RegisterType((*QueryEpochsInfoRequest)(nil), "client.x.epochs.types.QueryEpochsInfoRequest") - proto.RegisterType((*QueryEpochsInfoResponse)(nil), "client.x.epochs.types.QueryEpochsInfoResponse") - proto.RegisterType((*QueryCurrentEpochRequest)(nil), "client.x.epochs.types.QueryCurrentEpochRequest") - proto.RegisterType((*QueryCurrentEpochResponse)(nil), "client.x.epochs.types.QueryCurrentEpochResponse") + proto.RegisterType((*GetEpochInfosRequest)(nil), "client.x.epochs.types.GetEpochInfosRequest") + proto.RegisterType((*GetEpochInfosResponse)(nil), "client.x.epochs.types.GetEpochInfosResponse") + proto.RegisterType((*GetEpochInfoRequest)(nil), "client.x.epochs.types.GetEpochInfoRequest") + proto.RegisterType((*GetEpochInfoResponse)(nil), "client.x.epochs.types.GetEpochInfoResponse") } func init() { proto.RegisterFile("client/x/epochs/types/query.proto", fileDescriptor_78cd7c4fa831b33b) } var fileDescriptor_78cd7c4fa831b33b = []byte{ - // 367 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x31, 0x4b, 0x3b, 0x31, - 0x18, 0xc6, 0x2f, 0xed, 0xff, 0x5f, 0x30, 0xd6, 0x25, 0x58, 0x3d, 0x0f, 0x49, 0xaf, 0xd7, 0xc1, - 0x2e, 0x26, 0x5a, 0x37, 0x07, 0x91, 0x8a, 0x83, 0xa3, 0xb7, 0xe9, 0x22, 0xf5, 0x4c, 0xcf, 0x40, - 0x49, 0xae, 0x97, 0x54, 0xda, 0xd5, 0x5d, 0x10, 0xdc, 0xfc, 0x1c, 0x7e, 0x88, 0x8e, 0x05, 0x17, - 0x27, 0x91, 0xd6, 0x0f, 0x22, 0xcd, 0x05, 0xa9, 0xf4, 0x94, 0x6e, 0x21, 0xf9, 0x3d, 0xef, 0xf3, - 0xe4, 0x7d, 0x5f, 0x58, 0x8b, 0xba, 0x9c, 0x09, 0x4d, 0x07, 0x94, 0x25, 0x32, 0xba, 0x55, 0x54, - 0x0f, 0x13, 0xa6, 0x68, 0xaf, 0xcf, 0xd2, 0x21, 0x49, 0x52, 0xa9, 0x25, 0xaa, 0x64, 0x08, 0x19, - 0x90, 0x0c, 0x21, 0x06, 0xf1, 0xd6, 0x63, 0x19, 0x4b, 0x43, 0xd0, 0xd9, 0x29, 0x83, 0xbd, 0xed, - 0x58, 0xca, 0xb8, 0xcb, 0x68, 0x3b, 0xe1, 0xb4, 0x2d, 0x84, 0xd4, 0x6d, 0xcd, 0xa5, 0x50, 0xf6, - 0xb5, 0x9e, 0xef, 0x16, 0x33, 0xc1, 0x14, 0xb7, 0x50, 0xe0, 0xc2, 0x8d, 0xf3, 0x99, 0xfd, 0xa9, - 0x41, 0xce, 0x44, 0x47, 0x86, 0xac, 0xd7, 0x67, 0x4a, 0x07, 0x17, 0x70, 0x73, 0xe1, 0x45, 0x25, - 0x52, 0x28, 0x86, 0x8e, 0x60, 0x29, 0x2b, 0xe9, 0x02, 0xbf, 0xd8, 0x58, 0x6d, 0xfa, 0x24, 0x37, - 0x35, 0x31, 0xd2, 0x99, 0xb2, 0xf5, 0x6f, 0xf4, 0x5e, 0x75, 0x42, 0xab, 0x0a, 0x0e, 0xa1, 0x6b, - 0x4a, 0x9f, 0xf4, 0xd3, 0x94, 0x09, 0x6d, 0x30, 0x6b, 0x8b, 0x30, 0x84, 0xfc, 0x86, 0x09, 0xcd, - 0x3b, 0x9c, 0xa5, 0x2e, 0xf0, 0x41, 0x63, 0x25, 0x9c, 0xbb, 0x09, 0x8e, 0xe1, 0x56, 0x8e, 0xd6, - 0x06, 0xab, 0xc3, 0xb5, 0x28, 0xbb, 0xbf, 0x32, 0x56, 0x46, 0x5f, 0x0c, 0xcb, 0xd1, 0x1c, 0xdc, - 0x7c, 0x29, 0xc0, 0xff, 0xa6, 0x04, 0x7a, 0x00, 0x10, 0x7e, 0x67, 0x54, 0x68, 0xf7, 0x97, 0x6f, - 0xe4, 0x37, 0xc8, 0x23, 0xcb, 0xe2, 0x59, 0xb8, 0xc0, 0xbf, 0x7f, 0xfd, 0x7c, 0x2a, 0x78, 0xc8, - 0xa5, 0x76, 0x30, 0x76, 0x2c, 0x77, 0xfb, 0xf6, 0x84, 0x9e, 0x01, 0x2c, 0xcf, 0xff, 0x0b, 0xd1, - 0xbf, 0x2c, 0x72, 0xba, 0xe7, 0xed, 0x2d, 0x2f, 0xb0, 0xa9, 0x76, 0x4c, 0xaa, 0x1a, 0xaa, 0x2e, - 0xa6, 0xfa, 0xd1, 0xca, 0x16, 0x1d, 0x4d, 0x30, 0x18, 0x4f, 0x30, 0xf8, 0x98, 0x60, 0xf0, 0x38, - 0xc5, 0xce, 0x78, 0x8a, 0x9d, 0xb7, 0x29, 0x76, 0x2e, 0x2b, 0xb9, 0x8b, 0x76, 0x5d, 0x32, 0x1b, - 0x76, 0xf0, 0x15, 0x00, 0x00, 0xff, 0xff, 0x7b, 0x87, 0x4b, 0x27, 0xf6, 0x02, 0x00, 0x00, + // 350 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4c, 0xce, 0xc9, 0x4c, + 0xcd, 0x2b, 0xd1, 0xaf, 0xd0, 0x4f, 0x2d, 0xc8, 0x4f, 0xce, 0x28, 0xd6, 0x2f, 0xa9, 0x2c, 0x48, + 0x2d, 0xd6, 0x2f, 0x2c, 0x4d, 0x2d, 0xaa, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x85, + 0x28, 0xd1, 0xab, 0xd0, 0x83, 0x28, 0xd1, 0x03, 0x2b, 0x91, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, + 0xab, 0xd0, 0x07, 0xb1, 0x20, 0x8a, 0xa5, 0x64, 0xd2, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x13, + 0x0b, 0x32, 0xf5, 0x13, 0xf3, 0xf2, 0xf2, 0x4b, 0x12, 0x4b, 0x32, 0xf3, 0xf3, 0x8a, 0xa1, 0xb2, + 0xca, 0xd8, 0x6d, 0x4b, 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x84, 0x2a, 0x52, 0x12, 0xe3, 0x12, 0x71, + 0x4f, 0x2d, 0x71, 0x05, 0x29, 0xf0, 0xcc, 0x4b, 0xcb, 0x2f, 0x0e, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, + 0x2e, 0x51, 0x0a, 0xe7, 0x12, 0x45, 0x13, 0x2f, 0x2e, 0xc8, 0xcf, 0x2b, 0x4e, 0x15, 0xb2, 0xe3, + 0x62, 0x83, 0x18, 0x27, 0xc1, 0xa8, 0xc0, 0xac, 0xc1, 0x6d, 0xa4, 0xa0, 0x87, 0xd5, 0xc5, 0x7a, + 0x70, 0xad, 0x4e, 0x2c, 0x27, 0xee, 0xc9, 0x33, 0x04, 0x41, 0x75, 0x29, 0x99, 0x72, 0x09, 0x23, + 0x1b, 0x0c, 0xb5, 0x4f, 0x48, 0x8e, 0x8b, 0x2b, 0x33, 0x25, 0x35, 0xaf, 0x24, 0x33, 0x2d, 0x33, + 0xb5, 0x48, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08, 0x49, 0x44, 0x29, 0x04, 0xd5, 0x9d, 0x70, + 0xe7, 0xd8, 0x70, 0xb1, 0x82, 0x0d, 0x06, 0x6b, 0x21, 0xde, 0x35, 0x10, 0x4d, 0x46, 0x0b, 0x99, + 0xb8, 0x58, 0x03, 0x41, 0xa1, 0x2f, 0xd4, 0xc3, 0xc8, 0xc5, 0x8b, 0xe2, 0x61, 0x21, 0x6d, 0x1c, + 0x46, 0x61, 0x0b, 0x2e, 0x29, 0x1d, 0xe2, 0x14, 0x43, 0x1c, 0xad, 0xa4, 0xd0, 0x74, 0xf9, 0xc9, + 0x64, 0x26, 0x29, 0x21, 0x09, 0x7d, 0x68, 0x14, 0x41, 0x23, 0xa8, 0xcc, 0x10, 0xca, 0x12, 0xea, + 0x60, 0xe4, 0xe2, 0x41, 0xd6, 0x2b, 0xa4, 0x45, 0x84, 0x05, 0x30, 0xc7, 0x68, 0x13, 0xa5, 0x16, + 0xea, 0x16, 0x79, 0xb0, 0x5b, 0x24, 0x85, 0xc4, 0x71, 0xb8, 0xc5, 0x49, 0xff, 0xc4, 0x23, 0x39, + 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, + 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0xa2, 0x44, 0xb1, 0x26, 0xb0, 0x24, 0x36, 0x70, 0xca, 0x32, + 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x41, 0xe5, 0x68, 0xaf, 0xee, 0x02, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -245,10 +244,10 @@ const _ = grpc.SupportPackageIsVersion4 // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type QueryClient interface { - // EpochInfos provide running epochInfos - EpochInfos(ctx context.Context, in *QueryEpochsInfoRequest, opts ...grpc.CallOption) (*QueryEpochsInfoResponse, error) - // CurrentEpoch provide current epoch of specified identifier - CurrentEpoch(ctx context.Context, in *QueryCurrentEpochRequest, opts ...grpc.CallOption) (*QueryCurrentEpochResponse, error) + // GetEpochInfos provide running epochInfos + GetEpochInfos(ctx context.Context, in *GetEpochInfosRequest, opts ...grpc.CallOption) (*GetEpochInfosResponse, error) + // GetEpochInfo provide epochInfo of specified identifier + GetEpochInfo(ctx context.Context, in *GetEpochInfoRequest, opts ...grpc.CallOption) (*GetEpochInfoResponse, error) } type queryClient struct { @@ -259,18 +258,18 @@ func NewQueryClient(cc grpc1.ClientConn) QueryClient { return &queryClient{cc} } -func (c *queryClient) EpochInfos(ctx context.Context, in *QueryEpochsInfoRequest, opts ...grpc.CallOption) (*QueryEpochsInfoResponse, error) { - out := new(QueryEpochsInfoResponse) - err := c.cc.Invoke(ctx, "/client.x.epochs.types.Query/EpochInfos", in, out, opts...) +func (c *queryClient) GetEpochInfos(ctx context.Context, in *GetEpochInfosRequest, opts ...grpc.CallOption) (*GetEpochInfosResponse, error) { + out := new(GetEpochInfosResponse) + err := c.cc.Invoke(ctx, "/client.x.epochs.types.Query/GetEpochInfos", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *queryClient) CurrentEpoch(ctx context.Context, in *QueryCurrentEpochRequest, opts ...grpc.CallOption) (*QueryCurrentEpochResponse, error) { - out := new(QueryCurrentEpochResponse) - err := c.cc.Invoke(ctx, "/client.x.epochs.types.Query/CurrentEpoch", in, out, opts...) +func (c *queryClient) GetEpochInfo(ctx context.Context, in *GetEpochInfoRequest, opts ...grpc.CallOption) (*GetEpochInfoResponse, error) { + out := new(GetEpochInfoResponse) + err := c.cc.Invoke(ctx, "/client.x.epochs.types.Query/GetEpochInfo", in, out, opts...) if err != nil { return nil, err } @@ -279,59 +278,59 @@ func (c *queryClient) CurrentEpoch(ctx context.Context, in *QueryCurrentEpochReq // QueryServer is the server API for Query service. type QueryServer interface { - // EpochInfos provide running epochInfos - EpochInfos(context.Context, *QueryEpochsInfoRequest) (*QueryEpochsInfoResponse, error) - // CurrentEpoch provide current epoch of specified identifier - CurrentEpoch(context.Context, *QueryCurrentEpochRequest) (*QueryCurrentEpochResponse, error) + // GetEpochInfos provide running epochInfos + GetEpochInfos(context.Context, *GetEpochInfosRequest) (*GetEpochInfosResponse, error) + // GetEpochInfo provide epochInfo of specified identifier + GetEpochInfo(context.Context, *GetEpochInfoRequest) (*GetEpochInfoResponse, error) } // UnimplementedQueryServer can be embedded to have forward compatible implementations. type UnimplementedQueryServer struct { } -func (*UnimplementedQueryServer) EpochInfos(ctx context.Context, req *QueryEpochsInfoRequest) (*QueryEpochsInfoResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method EpochInfos not implemented") +func (*UnimplementedQueryServer) GetEpochInfos(ctx context.Context, req *GetEpochInfosRequest) (*GetEpochInfosResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetEpochInfos not implemented") } -func (*UnimplementedQueryServer) CurrentEpoch(ctx context.Context, req *QueryCurrentEpochRequest) (*QueryCurrentEpochResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CurrentEpoch not implemented") +func (*UnimplementedQueryServer) GetEpochInfo(ctx context.Context, req *GetEpochInfoRequest) (*GetEpochInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetEpochInfo not implemented") } func RegisterQueryServer(s grpc1.Server, srv QueryServer) { s.RegisterService(&_Query_serviceDesc, srv) } -func _Query_EpochInfos_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryEpochsInfoRequest) +func _Query_GetEpochInfos_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetEpochInfosRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(QueryServer).EpochInfos(ctx, in) + return srv.(QueryServer).GetEpochInfos(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/client.x.epochs.types.Query/EpochInfos", + FullMethod: "/client.x.epochs.types.Query/GetEpochInfos", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).EpochInfos(ctx, req.(*QueryEpochsInfoRequest)) + return srv.(QueryServer).GetEpochInfos(ctx, req.(*GetEpochInfosRequest)) } return interceptor(ctx, in, info, handler) } -func _Query_CurrentEpoch_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryCurrentEpochRequest) +func _Query_GetEpochInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetEpochInfoRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(QueryServer).CurrentEpoch(ctx, in) + return srv.(QueryServer).GetEpochInfo(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/client.x.epochs.types.Query/CurrentEpoch", + FullMethod: "/client.x.epochs.types.Query/GetEpochInfo", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).CurrentEpoch(ctx, req.(*QueryCurrentEpochRequest)) + return srv.(QueryServer).GetEpochInfo(ctx, req.(*GetEpochInfoRequest)) } return interceptor(ctx, in, info, handler) } @@ -341,19 +340,19 @@ var _Query_serviceDesc = grpc.ServiceDesc{ HandlerType: (*QueryServer)(nil), Methods: []grpc.MethodDesc{ { - MethodName: "EpochInfos", - Handler: _Query_EpochInfos_Handler, + MethodName: "GetEpochInfos", + Handler: _Query_GetEpochInfos_Handler, }, { - MethodName: "CurrentEpoch", - Handler: _Query_CurrentEpoch_Handler, + MethodName: "GetEpochInfo", + Handler: _Query_GetEpochInfo_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "client/x/epochs/types/query.proto", } -func (m *QueryEpochsInfoRequest) Marshal() (dAtA []byte, err error) { +func (m *GetEpochInfosRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -363,12 +362,12 @@ func (m *QueryEpochsInfoRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *QueryEpochsInfoRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *GetEpochInfosRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *QueryEpochsInfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *GetEpochInfosRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -376,7 +375,7 @@ func (m *QueryEpochsInfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) return len(dAtA) - i, nil } -func (m *QueryEpochsInfoResponse) Marshal() (dAtA []byte, err error) { +func (m *GetEpochInfosResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -386,12 +385,12 @@ func (m *QueryEpochsInfoResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *QueryEpochsInfoResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *GetEpochInfosResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *QueryEpochsInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *GetEpochInfosResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -413,7 +412,7 @@ func (m *QueryEpochsInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) return len(dAtA) - i, nil } -func (m *QueryCurrentEpochRequest) Marshal() (dAtA []byte, err error) { +func (m *GetEpochInfoRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -423,12 +422,12 @@ func (m *QueryCurrentEpochRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *QueryCurrentEpochRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *GetEpochInfoRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *QueryCurrentEpochRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *GetEpochInfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -443,7 +442,7 @@ func (m *QueryCurrentEpochRequest) MarshalToSizedBuffer(dAtA []byte) (int, error return len(dAtA) - i, nil } -func (m *QueryCurrentEpochResponse) Marshal() (dAtA []byte, err error) { +func (m *GetEpochInfoResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -453,21 +452,26 @@ func (m *QueryCurrentEpochResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *QueryCurrentEpochResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *GetEpochInfoResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *QueryCurrentEpochResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *GetEpochInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.CurrentEpoch != 0 { - i = encodeVarintQuery(dAtA, i, uint64(m.CurrentEpoch)) - i-- - dAtA[i] = 0x8 + { + size, err := m.Epoch.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa return len(dAtA) - i, nil } @@ -482,7 +486,7 @@ func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } -func (m *QueryEpochsInfoRequest) Size() (n int) { +func (m *GetEpochInfosRequest) Size() (n int) { if m == nil { return 0 } @@ -491,7 +495,7 @@ func (m *QueryEpochsInfoRequest) Size() (n int) { return n } -func (m *QueryEpochsInfoResponse) Size() (n int) { +func (m *GetEpochInfosResponse) Size() (n int) { if m == nil { return 0 } @@ -506,7 +510,7 @@ func (m *QueryEpochsInfoResponse) Size() (n int) { return n } -func (m *QueryCurrentEpochRequest) Size() (n int) { +func (m *GetEpochInfoRequest) Size() (n int) { if m == nil { return 0 } @@ -519,15 +523,14 @@ func (m *QueryCurrentEpochRequest) Size() (n int) { return n } -func (m *QueryCurrentEpochResponse) Size() (n int) { +func (m *GetEpochInfoResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.CurrentEpoch != 0 { - n += 1 + sovQuery(uint64(m.CurrentEpoch)) - } + l = m.Epoch.Size() + n += 1 + l + sovQuery(uint64(l)) return n } @@ -537,7 +540,7 @@ func sovQuery(x uint64) (n int) { func sozQuery(x uint64) (n int) { return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (m *QueryEpochsInfoRequest) Unmarshal(dAtA []byte) error { +func (m *GetEpochInfosRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -560,10 +563,10 @@ func (m *QueryEpochsInfoRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: QueryEpochsInfoRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetEpochInfosRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: QueryEpochsInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetEpochInfosRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -587,7 +590,7 @@ func (m *QueryEpochsInfoRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *QueryEpochsInfoResponse) Unmarshal(dAtA []byte) error { +func (m *GetEpochInfosResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -610,10 +613,10 @@ func (m *QueryEpochsInfoResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: QueryEpochsInfoResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetEpochInfosResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: QueryEpochsInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetEpochInfosResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -671,7 +674,7 @@ func (m *QueryEpochsInfoResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *QueryCurrentEpochRequest) Unmarshal(dAtA []byte) error { +func (m *GetEpochInfoRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -694,10 +697,10 @@ func (m *QueryCurrentEpochRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: QueryCurrentEpochRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetEpochInfoRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: QueryCurrentEpochRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetEpochInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -753,7 +756,7 @@ func (m *QueryCurrentEpochRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *QueryCurrentEpochResponse) Unmarshal(dAtA []byte) error { +func (m *GetEpochInfoResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -776,17 +779,17 @@ func (m *QueryCurrentEpochResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: QueryCurrentEpochResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetEpochInfoResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: QueryCurrentEpochResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetEpochInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentEpoch", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Epoch", wireType) } - m.CurrentEpoch = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowQuery @@ -796,11 +799,25 @@ func (m *QueryCurrentEpochResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentEpoch |= int64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Epoch.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipQuery(dAtA[iNdEx:]) diff --git a/client/x/epochs/types/query.proto b/client/x/epochs/types/query.proto index fd4f5916..40be331e 100644 --- a/client/x/epochs/types/query.proto +++ b/client/x/epochs/types/query.proto @@ -9,24 +9,24 @@ option go_package = "client/x/epochs/types"; // Query defines the gRPC querier service. service Query { - // EpochInfos provide running epochInfos - rpc EpochInfos(QueryEpochsInfoRequest) returns (QueryEpochsInfoResponse) { + // GetEpochInfos provide running epochInfos + rpc GetEpochInfos(GetEpochInfosRequest) returns (GetEpochInfosResponse) { option (google.api.http).get = "/client/epochs/v1/epochs"; } - // CurrentEpoch provide current epoch of specified identifier - rpc CurrentEpoch(QueryCurrentEpochRequest) returns (QueryCurrentEpochResponse) { - option (google.api.http).get = "/client/epochs/v1/current_epoch"; + // GetEpochInfo provide epochInfo of specified identifier + rpc GetEpochInfo(GetEpochInfoRequest) returns (GetEpochInfoResponse) { + option (google.api.http).get = "/client/epochs/v1/epoch"; } } -message QueryEpochsInfoRequest {} -message QueryEpochsInfoResponse { +message GetEpochInfosRequest {} +message GetEpochInfosResponse { repeated EpochInfo epochs = 1 [(gogoproto.nullable) = false]; } -message QueryCurrentEpochRequest { +message GetEpochInfoRequest { string identifier = 1; } -message QueryCurrentEpochResponse { - int64 current_epoch = 1; +message GetEpochInfoResponse { + EpochInfo epoch = 1 [(gogoproto.nullable) = false]; } \ No newline at end of file From 9e64242a15bf9baf64335b1822955c3ac2ed3f74 Mon Sep 17 00:00:00 2001 From: Zerui Ge Date: Wed, 9 Oct 2024 15:02:52 -0700 Subject: [PATCH 29/29] feat(mint): update mint param type (#182) * feat(mint): update param type * feat(mint): make param value consistent among example and tests --- client/x/mint/README.md | 16 +++--- client/x/mint/keeper/genesis_test.go | 3 +- client/x/mint/types/genesis.go | 2 +- client/x/mint/types/mint.pb.go | 80 +++++++++++++++++----------- client/x/mint/types/mint.proto | 8 ++- client/x/mint/types/params.go | 4 +- 6 files changed, 72 insertions(+), 41 deletions(-) diff --git a/client/x/mint/README.md b/client/x/mint/README.md index 600267c3..33171862 100644 --- a/client/x/mint/README.md +++ b/client/x/mint/README.md @@ -32,7 +32,11 @@ message Params { // type of coin to mint string mint_denom = 1; // inflation amount per year - uint64 inflations_per_year = 2; + string inflations_per_year = 2 [ + (cosmos_proto.scalar) = "cosmos.Dec", + (gogoproto.customtype) = "cosmossdk.io/math.LegacyDec", + (gogoproto.nullable) = false + ]; // expected blocks per year uint64 blocks_per_year = ; } @@ -58,11 +62,11 @@ type InflationCalculationFn func(ctx sdk.Context, minter Minter, params Params, The minting module contains the following parameters: -| Key | Type | Example | -|---------------------|-----------------|------------------------------| -| MintDenom | string | "stake" | -| InflationsPerYear | string (dec) | "24625000000000000" | -| BlocksPerYear | string (uint64) | "6311520" | +| Key | Type | Example | +|---------------------|-----------------|----------------------------------------| +| MintDenom | string | "stake" | +| InflationsPerYear | string (dec) | "24625000000000000.000000000000000000" | +| BlocksPerYear | string (uint64) | "6311520" | ## Events diff --git a/client/x/mint/keeper/genesis_test.go b/client/x/mint/keeper/genesis_test.go index affa5818..f9304eb5 100644 --- a/client/x/mint/keeper/genesis_test.go +++ b/client/x/mint/keeper/genesis_test.go @@ -4,6 +4,7 @@ package keeper_test import ( "testing" + "cosmossdk.io/math" storetypes "cosmossdk.io/store/types" "github.com/cosmos/cosmos-sdk/codec" @@ -62,7 +63,7 @@ func (s *GenesisTestSuite) TestImportExportGenesis() { genesisState := types.DefaultGenesisState() genesisState.Params = types.NewParams( "testDenom", - 24625000_000_000_000, + math.LegacyNewDec(24625000000000000.000000000000000000), uint64(60*60*8766/5), ) diff --git a/client/x/mint/types/genesis.go b/client/x/mint/types/genesis.go index ff80226e..656061ae 100644 --- a/client/x/mint/types/genesis.go +++ b/client/x/mint/types/genesis.go @@ -15,7 +15,7 @@ type InflationCalculationFn func(ctx context.Context, params Params, bondedRatio // DefaultInflationCalculationFn is the default function used to calculate inflation. func DefaultInflationCalculationFn(_ context.Context, params Params, _ math.LegacyDec) math.LegacyDec { - return math.LegacyNewDec(int64(params.InflationsPerYear)).Quo(math.LegacyNewDec(int64(params.BlocksPerYear))) + return params.InflationsPerYear.Quo(math.LegacyNewDec(int64(params.BlocksPerYear))) } // NewGenesisState creates a new GenesisState object. diff --git a/client/x/mint/types/mint.pb.go b/client/x/mint/types/mint.pb.go index 59f0e424..30133b79 100644 --- a/client/x/mint/types/mint.pb.go +++ b/client/x/mint/types/mint.pb.go @@ -4,8 +4,11 @@ package types import ( + cosmossdk_io_math "cosmossdk.io/math" fmt "fmt" + _ "github.com/cosmos/cosmos-proto" _ "github.com/cosmos/cosmos-sdk/types/tx/amino" + _ "github.com/cosmos/gogoproto/gogoproto" proto "github.com/cosmos/gogoproto/proto" io "io" math "math" @@ -28,7 +31,7 @@ type Params struct { // type of coin to mint MintDenom string `protobuf:"bytes,1,opt,name=mint_denom,json=mintDenom,proto3" json:"mint_denom,omitempty"` // inflation amount per year - InflationsPerYear uint64 `protobuf:"varint,2,opt,name=inflations_per_year,json=inflationsPerYear,proto3" json:"inflations_per_year,omitempty"` + InflationsPerYear cosmossdk_io_math.LegacyDec `protobuf:"bytes,2,opt,name=inflations_per_year,json=inflationsPerYear,proto3,customtype=cosmossdk.io/math.LegacyDec" json:"inflations_per_year"` // expected blocks per year BlocksPerYear uint64 `protobuf:"varint,3,opt,name=blocks_per_year,json=blocksPerYear,proto3" json:"blocks_per_year,omitempty"` } @@ -73,13 +76,6 @@ func (m *Params) GetMintDenom() string { return "" } -func (m *Params) GetInflationsPerYear() uint64 { - if m != nil { - return m.InflationsPerYear - } - return 0 -} - func (m *Params) GetBlocksPerYear() uint64 { if m != nil { return m.BlocksPerYear @@ -94,21 +90,26 @@ func init() { func init() { proto.RegisterFile("client/x/mint/types/mint.proto", fileDescriptor_9c6e60aec58f52af) } var fileDescriptor_9c6e60aec58f52af = []byte{ - // 219 bytes of a gzipped FileDescriptorProto + // 290 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xce, 0xc9, 0x4c, 0xcd, 0x2b, 0xd1, 0xaf, 0xd0, 0xcf, 0xcd, 0xcc, 0x2b, 0xd1, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0x06, 0x33, 0xf5, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, 0x85, 0x84, 0x21, 0xf2, 0x7a, 0x15, 0x7a, 0x60, 0x41, - 0xb0, 0xbc, 0x94, 0x60, 0x62, 0x6e, 0x66, 0x5e, 0xbe, 0x3e, 0x98, 0x84, 0xa8, 0x53, 0x9a, 0xc5, - 0xc8, 0xc5, 0x16, 0x90, 0x58, 0x94, 0x98, 0x5b, 0x2c, 0x24, 0xcb, 0xc5, 0x05, 0x52, 0x1b, 0x9f, - 0x92, 0x9a, 0x97, 0x9f, 0x2b, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x19, 0xc4, 0x09, 0x12, 0x71, 0x01, - 0x09, 0x08, 0xe9, 0x71, 0x09, 0x67, 0xe6, 0xa5, 0xe5, 0x24, 0x96, 0x64, 0xe6, 0xe7, 0x15, 0xc7, - 0x17, 0xa4, 0x16, 0xc5, 0x57, 0xa6, 0x26, 0x16, 0x49, 0x30, 0x29, 0x30, 0x6a, 0xb0, 0x04, 0x09, - 0x22, 0xa4, 0x02, 0x52, 0x8b, 0x22, 0x53, 0x13, 0x8b, 0x84, 0xd4, 0xb8, 0xf8, 0x93, 0x72, 0xf2, - 0x93, 0xb3, 0x91, 0xd4, 0x32, 0x83, 0xd5, 0xf2, 0x42, 0x84, 0xa1, 0xea, 0xac, 0x24, 0xbb, 0x9e, - 0x6f, 0xd0, 0x12, 0x41, 0xf5, 0x0e, 0xc4, 0x45, 0x4e, 0xba, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, - 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x17, 0x1e, 0xcb, 0x31, 0xdc, - 0x78, 0x2c, 0xc7, 0x10, 0x25, 0x8c, 0xc5, 0xfb, 0x49, 0x6c, 0x60, 0x2f, 0x19, 0x03, 0x02, 0x00, - 0x00, 0xff, 0xff, 0xd9, 0x4d, 0x19, 0x4f, 0x1c, 0x01, 0x00, 0x00, + 0xb0, 0xbc, 0x94, 0x60, 0x62, 0x6e, 0x66, 0x5e, 0xbe, 0x3e, 0x98, 0x84, 0xa8, 0x93, 0x92, 0x4c, + 0xce, 0x2f, 0xce, 0xcd, 0x2f, 0x8e, 0x07, 0xf3, 0xf4, 0x21, 0x1c, 0xa8, 0x94, 0x48, 0x7a, 0x7e, + 0x7a, 0x3e, 0x44, 0x1c, 0xc4, 0x82, 0x88, 0x2a, 0x9d, 0x65, 0xe4, 0x62, 0x0b, 0x48, 0x2c, 0x4a, + 0xcc, 0x2d, 0x16, 0x92, 0xe5, 0xe2, 0x02, 0x19, 0x1e, 0x9f, 0x92, 0x9a, 0x97, 0x9f, 0x2b, 0xc1, + 0xa8, 0xc0, 0xa8, 0xc1, 0x19, 0xc4, 0x09, 0x12, 0x71, 0x01, 0x09, 0x08, 0x25, 0x72, 0x09, 0x67, + 0xe6, 0xa5, 0xe5, 0x24, 0x96, 0x64, 0xe6, 0xe7, 0x15, 0xc7, 0x17, 0xa4, 0x16, 0xc5, 0x57, 0xa6, + 0x26, 0x16, 0x49, 0x30, 0x81, 0xd4, 0x39, 0x19, 0x9e, 0xb8, 0x27, 0xcf, 0x70, 0xeb, 0x9e, 0xbc, + 0x34, 0xc4, 0xca, 0xe2, 0x94, 0x6c, 0xbd, 0xcc, 0x7c, 0xfd, 0xdc, 0xc4, 0x92, 0x0c, 0x3d, 0x9f, + 0xd4, 0xf4, 0xc4, 0xe4, 0x4a, 0x97, 0xd4, 0xe4, 0x4b, 0x5b, 0x74, 0xb9, 0xa0, 0x2e, 0x72, 0x49, + 0x4d, 0x0e, 0x12, 0x44, 0x98, 0x16, 0x90, 0x5a, 0x14, 0x99, 0x9a, 0x58, 0x24, 0xa4, 0xc6, 0xc5, + 0x9f, 0x94, 0x93, 0x9f, 0x9c, 0x8d, 0x64, 0x3c, 0xb3, 0x02, 0xa3, 0x06, 0x4b, 0x10, 0x2f, 0x44, + 0x18, 0xaa, 0xce, 0x4a, 0xb2, 0xeb, 0xf9, 0x06, 0x2d, 0x11, 0xd4, 0x20, 0x83, 0x78, 0xc2, 0x49, + 0xf7, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, + 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0xa2, 0x84, 0xb1, 0x04, 0x71, 0x12, + 0x1b, 0x38, 0x14, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x30, 0x59, 0xae, 0xed, 0x80, 0x01, + 0x00, 0x00, } func (m *Params) Marshal() (dAtA []byte, err error) { @@ -136,11 +137,16 @@ func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x18 } - if m.InflationsPerYear != 0 { - i = encodeVarintMint(dAtA, i, uint64(m.InflationsPerYear)) - i-- - dAtA[i] = 0x10 + { + size := m.InflationsPerYear.Size() + i -= size + if _, err := m.InflationsPerYear.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintMint(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 if len(m.MintDenom) > 0 { i -= len(m.MintDenom) copy(dAtA[i:], m.MintDenom) @@ -172,9 +178,8 @@ func (m *Params) Size() (n int) { if l > 0 { n += 1 + l + sovMint(uint64(l)) } - if m.InflationsPerYear != 0 { - n += 1 + sovMint(uint64(m.InflationsPerYear)) - } + l = m.InflationsPerYear.Size() + n += 1 + l + sovMint(uint64(l)) if m.BlocksPerYear != 0 { n += 1 + sovMint(uint64(m.BlocksPerYear)) } @@ -249,10 +254,10 @@ func (m *Params) Unmarshal(dAtA []byte) error { m.MintDenom = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 0 { + if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field InflationsPerYear", wireType) } - m.InflationsPerYear = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMint @@ -262,11 +267,26 @@ func (m *Params) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.InflationsPerYear |= uint64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMint + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.InflationsPerYear.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field BlocksPerYear", wireType) diff --git a/client/x/mint/types/mint.proto b/client/x/mint/types/mint.proto index 509ccd8f..144a51ea 100644 --- a/client/x/mint/types/mint.proto +++ b/client/x/mint/types/mint.proto @@ -4,6 +4,8 @@ package client.x.mint.types; option go_package = "client/x/mint/types"; import "amino/amino.proto"; +import "cosmos_proto/cosmos.proto"; +import "gogoproto/gogo.proto"; // Params defines the parameters for the x/mint module. message Params { @@ -12,7 +14,11 @@ message Params { // type of coin to mint string mint_denom = 1; // inflation amount per year - uint64 inflations_per_year = 2; + string inflations_per_year = 2 [ + (cosmos_proto.scalar) = "cosmos.Dec", + (gogoproto.customtype) = "cosmossdk.io/math.LegacyDec", + (gogoproto.nullable) = false + ]; // expected blocks per year uint64 blocks_per_year = 3; } diff --git a/client/x/mint/types/params.go b/client/x/mint/types/params.go index cc5197a3..20ac574c 100644 --- a/client/x/mint/types/params.go +++ b/client/x/mint/types/params.go @@ -13,7 +13,7 @@ import ( ) // NewParams returns Params instance with the given values. -func NewParams(mintDenom string, inflationsPerYear uint64, blocksPerYear uint64) Params { +func NewParams(mintDenom string, inflationsPerYear math.LegacyDec, blocksPerYear uint64) Params { return Params{ MintDenom: mintDenom, InflationsPerYear: inflationsPerYear, @@ -25,7 +25,7 @@ func NewParams(mintDenom string, inflationsPerYear uint64, blocksPerYear uint64) func DefaultParams() Params { return Params{ MintDenom: sdk.DefaultBondDenom, - InflationsPerYear: 24625000_000_000_000, + InflationsPerYear: math.LegacyNewDec(24625000000000000.000000000000000000), BlocksPerYear: uint64(60 * 60 * 8766 / 5), // assuming 5 second block times } }