diff --git a/.changelog/config.toml b/.changelog/config.toml deleted file mode 100644 index 818370b9..00000000 --- a/.changelog/config.toml +++ /dev/null @@ -1,20 +0,0 @@ -project_url = "https://github.com/cosmos/gaia" - -# Settings related to components/sub-modules. Only relevant if you make use of -# components/sub-modules. -[components] - -# The title to use for the section of entries not relating to a specific -# component. -general_entries_title = "General" - -# The number of spaces to inject before each component-related entry. -entry_indent = 2 - - # The components themselves. Each component has a name (used when rendered - # to Markdown) and a path relative to the project folder (i.e. relative to - # the parent of the `.changelog` folder). - [components.all] - globalfee = { name = "GlobalFee", path = "x/globalfee" } - tests = { name = "Tests", path = "tests" } - docs = { name = "Documentation", path = "docs" } \ No newline at end of file diff --git a/.changelog/epilogue.md b/.changelog/epilogue.md deleted file mode 100644 index c15376c0..00000000 --- a/.changelog/epilogue.md +++ /dev/null @@ -1,3 +0,0 @@ -## Previous Versions - -[CHANGELOG of previous versions](https://github.com/cosmos/gaia/blob/main/CHANGELOG.md) \ No newline at end of file diff --git a/.changelog/unreleased/.gitkeep b/.changelog/unreleased/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/.changelog/v15.0.0/api-breaking/2912-vote-spam.md b/.changelog/v15.0.0/api-breaking/2912-vote-spam.md deleted file mode 100644 index 7de12dbd..00000000 --- a/.changelog/v15.0.0/api-breaking/2912-vote-spam.md +++ /dev/null @@ -1,2 +0,0 @@ -- Reject `MsgVote` messages from accounts with less than 1 atom staked. - ([\#2912](https://github.com/cosmos/gaia/pull/2912)) \ No newline at end of file diff --git a/.changelog/v15.0.0/api-breaking/2967-bump-ics.md b/.changelog/v15.0.0/api-breaking/2967-bump-ics.md deleted file mode 100644 index f2c86b36..00000000 --- a/.changelog/v15.0.0/api-breaking/2967-bump-ics.md +++ /dev/null @@ -1,4 +0,0 @@ -- The consumer CCV genesis state obtained from the provider chain needs to be - transformed to be compatible with older versions of consumer chains - (see [ICS docs](https://cosmos.github.io/interchain-security/consumer-development/consumer-genesis-transformation)). - ([\#2967](https://github.com/cosmos/gaia/pull/2967)) diff --git a/.changelog/v15.0.0/api-breaking/2967-bump-sdk.md b/.changelog/v15.0.0/api-breaking/2967-bump-sdk.md deleted file mode 100644 index 101d4ed8..00000000 --- a/.changelog/v15.0.0/api-breaking/2967-bump-sdk.md +++ /dev/null @@ -1,8 +0,0 @@ -- Bump [cosmos-sdk](https://github.com/cosmos/cosmos-sdk) to - [v0.47.10-ics-lsm](https://github.com/cosmos/cosmos-sdk/tree/v0.47.10-ics-lsm). - As compared to [v0.47.10](https://github.com/cosmos/cosmos-sdk/tree/v0.47.10), - this special branch of cosmos-sdk has the following API-breaking changes: - ([\#2967](https://github.com/cosmos/gaia/pull/2967)) - - Limit the accepted deposit coins for a proposal to the minimum proposal deposit denoms (e.g., `uatom` for Cosmos Hub). ([sdk-#19302](https://github.com/cosmos/cosmos-sdk/pull/19302)) - - Add denom check to reject denoms outside of those listed in `MinDeposit`. A new `MinDepositRatio` param is added (with a default value of `0.01`) and now deposits are required to be at least `MinDepositRatio*MinDeposit` to be accepted. ([sdk-#19312](https://github.com/cosmos/cosmos-sdk/pull/19312)) - - Disable the `DenomOwners` query. ([sdk-#19266](https://github.com/cosmos/cosmos-sdk/pull/19266)) \ No newline at end of file diff --git a/.changelog/v15.0.0/bug-fixes/2912-vote-spam.md b/.changelog/v15.0.0/bug-fixes/2912-vote-spam.md deleted file mode 100644 index aebfdfa0..00000000 --- a/.changelog/v15.0.0/bug-fixes/2912-vote-spam.md +++ /dev/null @@ -1,2 +0,0 @@ -- Add ante handler that only allows `MsgVote` messages from accounts with at least - 1 atom staked. ([\#2912](https://github.com/cosmos/gaia/pull/2912)) \ No newline at end of file diff --git a/.changelog/v15.0.0/bug-fixes/2967-bump-sdk.md b/.changelog/v15.0.0/bug-fixes/2967-bump-sdk.md deleted file mode 100644 index b81fb959..00000000 --- a/.changelog/v15.0.0/bug-fixes/2967-bump-sdk.md +++ /dev/null @@ -1,12 +0,0 @@ -- Bump [cosmos-sdk](https://github.com/cosmos/cosmos-sdk) to - [v0.47.10-ics-lsm](https://github.com/cosmos/cosmos-sdk/tree/v0.47.10-ics-lsm). - This special branch of cosmos-sdk backports a series of fixes for issues found - during the [Oak Security audit of SDK 0.47](https://github.com/oak-security/audit-reports/blob/master/Cosmos%20SDK/2024-01-23%20Audit%20Report%20-%20Cosmos%20SDK%20v1.0.pdf). - ([\#2967](https://github.com/cosmos/gaia/pull/2967)) - - Backport [sdk-#18146](https://github.com/cosmos/cosmos-sdk/pull/18146): Add denom check to reject denoms outside of those listed in `MinDeposit`. A new `MinDepositRatio` param is added (with a default value of `0.01`) and now deposits are required to be at least `MinDepositRatio*MinDeposit` to be accepted. ([sdk-#19312](https://github.com/cosmos/cosmos-sdk/pull/19312)) - - Partially backport [sdk-#18047](https://github.com/cosmos/cosmos-sdk/pull/18047): Add a limit of 200 grants pruned per `EndBlock` in the feegrant module. ([sdk-#19314](https://github.com/cosmos/cosmos-sdk/pull/19314)) - - Partially backport [skd-#18737](https://github.com/cosmos/cosmos-sdk/pull/18737): Add a limit of 200 grants pruned per `BeginBlock` in the authz module. ([sdk-#19315](https://github.com/cosmos/cosmos-sdk/pull/19315)) - - Backport [sdk-#18173](https://github.com/cosmos/cosmos-sdk/pull/18173): Gov Hooks now returns error and are "blocking" if they fail. Expect for `AfterProposalFailedMinDeposit` and `AfterProposalVotingPeriodEnded` that will log the error and continue. ([sdk-#19305](https://github.com/cosmos/cosmos-sdk/pull/19305)) - - Backport [sdk-#18189](https://github.com/cosmos/cosmos-sdk/pull/18189): Limit the accepted deposit coins for a proposal to the minimum proposal deposit denoms. ([sdk-#19302](https://github.com/cosmos/cosmos-sdk/pull/19302)) - - Backport [sdk-#18214](https://github.com/cosmos/cosmos-sdk/pull/18214) and [sdk-#17352](https://github.com/cosmos/cosmos-sdk/pull/17352): Ensure that modifying the argument to `NewUIntFromBigInt` and `NewIntFromBigInt` doesn't mutate the returned value. ([sdk-#19293](https://github.com/cosmos/cosmos-sdk/pull/19293)) - \ No newline at end of file diff --git a/.changelog/v15.0.0/dependencies/2852-bump-comet.md b/.changelog/v15.0.0/dependencies/2852-bump-comet.md deleted file mode 100644 index d8c66a99..00000000 --- a/.changelog/v15.0.0/dependencies/2852-bump-comet.md +++ /dev/null @@ -1,3 +0,0 @@ -- Bump [CometBFT](https://github.com/cometbft/cometbft) - to [v0.37.4](https://github.com/cometbft/cometbft/releases/tag/v0.37.4) - ([\#2852](https://github.com/cosmos/gaia/pull/2852)) \ No newline at end of file diff --git a/.changelog/v15.0.0/dependencies/2852-bump-ibc.md b/.changelog/v15.0.0/dependencies/2852-bump-ibc.md deleted file mode 100644 index 3d882274..00000000 --- a/.changelog/v15.0.0/dependencies/2852-bump-ibc.md +++ /dev/null @@ -1,3 +0,0 @@ -- Bump [ibc-go](https://github.com/cosmos/ibc-go) to - [v7.3.1](https://github.com/cosmos/ibc-go/releases/tag/v7.3.1) - ([\#2852](https://github.com/cosmos/gaia/pull/2852)) \ No newline at end of file diff --git a/.changelog/v15.0.0/dependencies/2852-bump-pfm.md b/.changelog/v15.0.0/dependencies/2852-bump-pfm.md deleted file mode 100644 index d7b41074..00000000 --- a/.changelog/v15.0.0/dependencies/2852-bump-pfm.md +++ /dev/null @@ -1,3 +0,0 @@ -- Bump [PFM](https://github.com/cosmos/ibc-apps/tree/main/middleware) - to [v7.1.2](https://github.com/cosmos/ibc-apps/releases/tag/middleware%2Fpacket-forward-middleware%2Fv7.1.2) - ([\#2852](https://github.com/cosmos/gaia/pull/2852)) \ No newline at end of file diff --git a/.changelog/v15.0.0/dependencies/2967-bump-ics.md b/.changelog/v15.0.0/dependencies/2967-bump-ics.md deleted file mode 100644 index e471b325..00000000 --- a/.changelog/v15.0.0/dependencies/2967-bump-ics.md +++ /dev/null @@ -1,3 +0,0 @@ -- Bump [ICS](https://github.com/cosmos/interchain-security) to - [v3.3.3-lsm](https://github.com/cosmos/interchain-security/releases/tag/v3.3.3-lsm) - ([\#2967](https://github.com/cosmos/gaia/pull/2967)) \ No newline at end of file diff --git a/.changelog/v15.0.0/dependencies/2967-bump-sdk.md b/.changelog/v15.0.0/dependencies/2967-bump-sdk.md deleted file mode 100644 index 0cf38025..00000000 --- a/.changelog/v15.0.0/dependencies/2967-bump-sdk.md +++ /dev/null @@ -1,4 +0,0 @@ -- Bump [cosmos-sdk](https://github.com/cosmos/cosmos-sdk) to - [v0.47.10-ics-lsm](https://github.com/cosmos/cosmos-sdk/tree/v0.47.10-ics-lsm). - This is a special cosmos-sdk branch with support for both ICS and LSM. - ([\#2967](https://github.com/cosmos/gaia/pull/2967)) \ No newline at end of file diff --git a/.changelog/v15.0.0/features/2960-add-metaprotocols-support.md b/.changelog/v15.0.0/features/2960-add-metaprotocols-support.md deleted file mode 100644 index 84b5a12f..00000000 --- a/.changelog/v15.0.0/features/2960-add-metaprotocols-support.md +++ /dev/null @@ -1,2 +0,0 @@ -- Add support for metaprotocols using Tx extension options. - ([\#2960](https://github.com/cosmos/gaia/pull/2960)) \ No newline at end of file diff --git a/.changelog/v15.0.0/state-breaking/2852-bump-comet.md b/.changelog/v15.0.0/state-breaking/2852-bump-comet.md deleted file mode 100644 index d8c66a99..00000000 --- a/.changelog/v15.0.0/state-breaking/2852-bump-comet.md +++ /dev/null @@ -1,3 +0,0 @@ -- Bump [CometBFT](https://github.com/cometbft/cometbft) - to [v0.37.4](https://github.com/cometbft/cometbft/releases/tag/v0.37.4) - ([\#2852](https://github.com/cosmos/gaia/pull/2852)) \ No newline at end of file diff --git a/.changelog/v15.0.0/state-breaking/2852-bump-ibc.md b/.changelog/v15.0.0/state-breaking/2852-bump-ibc.md deleted file mode 100644 index 3d882274..00000000 --- a/.changelog/v15.0.0/state-breaking/2852-bump-ibc.md +++ /dev/null @@ -1,3 +0,0 @@ -- Bump [ibc-go](https://github.com/cosmos/ibc-go) to - [v7.3.1](https://github.com/cosmos/ibc-go/releases/tag/v7.3.1) - ([\#2852](https://github.com/cosmos/gaia/pull/2852)) \ No newline at end of file diff --git a/.changelog/v15.0.0/state-breaking/2852-bump-pfm.md b/.changelog/v15.0.0/state-breaking/2852-bump-pfm.md deleted file mode 100644 index d7b41074..00000000 --- a/.changelog/v15.0.0/state-breaking/2852-bump-pfm.md +++ /dev/null @@ -1,3 +0,0 @@ -- Bump [PFM](https://github.com/cosmos/ibc-apps/tree/main/middleware) - to [v7.1.2](https://github.com/cosmos/ibc-apps/releases/tag/middleware%2Fpacket-forward-middleware%2Fv7.1.2) - ([\#2852](https://github.com/cosmos/gaia/pull/2852)) \ No newline at end of file diff --git a/.changelog/v15.0.0/state-breaking/2855-migrate-min-commission-rate.md b/.changelog/v15.0.0/state-breaking/2855-migrate-min-commission-rate.md deleted file mode 100644 index 64e5348b..00000000 --- a/.changelog/v15.0.0/state-breaking/2855-migrate-min-commission-rate.md +++ /dev/null @@ -1,7 +0,0 @@ -- Set min commission rate staking parameter to `5%` - ([prop 826](https://www.mintscan.io/cosmos/proposals/826)) - and update the commission rate for all validators that have a commission - rate less than `5%`. ([\#2855](https://github.com/cosmos/gaia/pull/2855)) - - - diff --git a/.changelog/v15.0.0/state-breaking/2866-migrate-signing-infos.md b/.changelog/v15.0.0/state-breaking/2866-migrate-signing-infos.md deleted file mode 100644 index f50ee77d..00000000 --- a/.changelog/v15.0.0/state-breaking/2866-migrate-signing-infos.md +++ /dev/null @@ -1,5 +0,0 @@ -- Migrate the signing infos of validators for which the consensus address is missing. -([\#2886](https://github.com/cosmos/gaia/pull/2886)) - - - diff --git a/.changelog/v15.0.0/state-breaking/2891-migrate-vesting-funds.md b/.changelog/v15.0.0/state-breaking/2891-migrate-vesting-funds.md deleted file mode 100644 index b1311529..00000000 --- a/.changelog/v15.0.0/state-breaking/2891-migrate-vesting-funds.md +++ /dev/null @@ -1,3 +0,0 @@ -- Migrate vesting funds from "cosmos145hytrc49m0hn6fphp8d5h4xspwkawcuzmx498" - to community pool according to signal prop [860](https://www.mintscan.io/cosmos/proposals/860). - ([\#2891](https://github.com/cosmos/gaia/pull/2891)) \ No newline at end of file diff --git a/.changelog/v15.0.0/state-breaking/2912-vote-spam.md b/.changelog/v15.0.0/state-breaking/2912-vote-spam.md deleted file mode 100644 index aebfdfa0..00000000 --- a/.changelog/v15.0.0/state-breaking/2912-vote-spam.md +++ /dev/null @@ -1,2 +0,0 @@ -- Add ante handler that only allows `MsgVote` messages from accounts with at least - 1 atom staked. ([\#2912](https://github.com/cosmos/gaia/pull/2912)) \ No newline at end of file diff --git a/.changelog/v15.0.0/state-breaking/2913-gov-spam.md b/.changelog/v15.0.0/state-breaking/2913-gov-spam.md deleted file mode 100644 index 1f80acb0..00000000 --- a/.changelog/v15.0.0/state-breaking/2913-gov-spam.md +++ /dev/null @@ -1,3 +0,0 @@ -- Remove `GovPreventSpamDecorator` and initialize the `MinInitialDepositRatio` gov - param to `10%`. - ([\#2913](https://github.com/cosmos/gaia/pull/2913)) \ No newline at end of file diff --git a/.changelog/v15.0.0/state-breaking/2960-add-metaprotocols-support.md b/.changelog/v15.0.0/state-breaking/2960-add-metaprotocols-support.md deleted file mode 100644 index 84b5a12f..00000000 --- a/.changelog/v15.0.0/state-breaking/2960-add-metaprotocols-support.md +++ /dev/null @@ -1,2 +0,0 @@ -- Add support for metaprotocols using Tx extension options. - ([\#2960](https://github.com/cosmos/gaia/pull/2960)) \ No newline at end of file diff --git a/.changelog/v15.0.0/state-breaking/2967-bump-ics.md b/.changelog/v15.0.0/state-breaking/2967-bump-ics.md deleted file mode 100644 index e471b325..00000000 --- a/.changelog/v15.0.0/state-breaking/2967-bump-ics.md +++ /dev/null @@ -1,3 +0,0 @@ -- Bump [ICS](https://github.com/cosmos/interchain-security) to - [v3.3.3-lsm](https://github.com/cosmos/interchain-security/releases/tag/v3.3.3-lsm) - ([\#2967](https://github.com/cosmos/gaia/pull/2967)) \ No newline at end of file diff --git a/.changelog/v15.0.0/state-breaking/2967-bump-sdk.md b/.changelog/v15.0.0/state-breaking/2967-bump-sdk.md deleted file mode 100644 index ff489c5c..00000000 --- a/.changelog/v15.0.0/state-breaking/2967-bump-sdk.md +++ /dev/null @@ -1,5 +0,0 @@ -- Bump [cosmos-sdk](https://github.com/cosmos/cosmos-sdk) to - [v0.47.10-ics-lsm](https://github.com/cosmos/cosmos-sdk/tree/v0.47.10-ics-lsm). - This is a special cosmos-sdk branch with support for both ICS and LSM. - ([\#2967](https://github.com/cosmos/gaia/pull/2967)) - - Skip running `addDenomReverseIndex` in `bank/v3` migration as it is prohibitively expensive to run on the Cosmos Hub. ([sdk-#19266](https://github.com/cosmos/cosmos-sdk/pull/19266)) \ No newline at end of file diff --git a/.changelog/v15.0.0/summary.md b/.changelog/v15.0.0/summary.md deleted file mode 100644 index 259d75b1..00000000 --- a/.changelog/v15.0.0/summary.md +++ /dev/null @@ -1 +0,0 @@ -*February 20, 2024* diff --git a/.changelog/v15.1.0/dependencies/2982-bump-pfm.md b/.changelog/v15.1.0/dependencies/2982-bump-pfm.md deleted file mode 100644 index a93387dc..00000000 --- a/.changelog/v15.1.0/dependencies/2982-bump-pfm.md +++ /dev/null @@ -1,2 +0,0 @@ -- Bump [PFM](https://github.com/cosmos/ibc-apps/tree/main/middleware) to `v7.1.3-0.20240228213828-cce7f56d000b`. - ([\#2982](https://github.com/cosmos/gaia/pull/2982)) \ No newline at end of file diff --git a/.changelog/v15.1.0/features/2974-add-snapshot-commands.md b/.changelog/v15.1.0/features/2974-add-snapshot-commands.md deleted file mode 100644 index e22dced0..00000000 --- a/.changelog/v15.1.0/features/2974-add-snapshot-commands.md +++ /dev/null @@ -1 +0,0 @@ -- Add gaiad snapshots command set ([\#2974](https://github.com/cosmos/gaia/pull/2974)) diff --git a/.changelog/v15.1.0/state-breaking/2982-bump-pfm.md b/.changelog/v15.1.0/state-breaking/2982-bump-pfm.md deleted file mode 100644 index a93387dc..00000000 --- a/.changelog/v15.1.0/state-breaking/2982-bump-pfm.md +++ /dev/null @@ -1,2 +0,0 @@ -- Bump [PFM](https://github.com/cosmos/ibc-apps/tree/main/middleware) to `v7.1.3-0.20240228213828-cce7f56d000b`. - ([\#2982](https://github.com/cosmos/gaia/pull/2982)) \ No newline at end of file diff --git a/.changelog/v15.1.0/state-breaking/2993-migrate-escrow-accounts.md b/.changelog/v15.1.0/state-breaking/2993-migrate-escrow-accounts.md deleted file mode 100644 index d651b2f4..00000000 --- a/.changelog/v15.1.0/state-breaking/2993-migrate-escrow-accounts.md +++ /dev/null @@ -1,3 +0,0 @@ -- Mint and transfer missing assets in escrow accounts - to reach parity with counterparty chain supply. - ([\#2993](https://github.com/cosmos/gaia/pull/2993)) \ No newline at end of file diff --git a/.changelog/v15.1.0/summary.md b/.changelog/v15.1.0/summary.md deleted file mode 100644 index aa56eae6..00000000 --- a/.changelog/v15.1.0/summary.md +++ /dev/null @@ -1 +0,0 @@ -*March 15, 2024* diff --git a/.changelog/v15.2.0/bug-fixes/3025-gov-metatdata-len.md b/.changelog/v15.2.0/bug-fixes/3025-gov-metatdata-len.md deleted file mode 100644 index 4eead990..00000000 --- a/.changelog/v15.2.0/bug-fixes/3025-gov-metatdata-len.md +++ /dev/null @@ -1 +0,0 @@ -- Increase x/gov metadata fields length to 10200 ([\#3025](https://github.com/cosmos/gaia/pull/3025)) diff --git a/.changelog/v15.2.0/bug-fixes/3032-historic-tx-extensions.md b/.changelog/v15.2.0/bug-fixes/3032-historic-tx-extensions.md deleted file mode 100644 index 8d40d4a9..00000000 --- a/.changelog/v15.2.0/bug-fixes/3032-historic-tx-extensions.md +++ /dev/null @@ -1 +0,0 @@ -- Fix parsing of historic Txs with TxExtensionOptions ([\#3032](https://github.com/cosmos/gaia/pull/3032)) \ No newline at end of file diff --git a/.changelog/v15.2.0/state-breaking/3025-gov-metatdata-len.md b/.changelog/v15.2.0/state-breaking/3025-gov-metatdata-len.md deleted file mode 100644 index 4eead990..00000000 --- a/.changelog/v15.2.0/state-breaking/3025-gov-metatdata-len.md +++ /dev/null @@ -1 +0,0 @@ -- Increase x/gov metadata fields length to 10200 ([\#3025](https://github.com/cosmos/gaia/pull/3025)) diff --git a/.changelog/v15.2.0/state-breaking/3032-historic-tx-extensions.md b/.changelog/v15.2.0/state-breaking/3032-historic-tx-extensions.md deleted file mode 100644 index 8d40d4a9..00000000 --- a/.changelog/v15.2.0/state-breaking/3032-historic-tx-extensions.md +++ /dev/null @@ -1 +0,0 @@ -- Fix parsing of historic Txs with TxExtensionOptions ([\#3032](https://github.com/cosmos/gaia/pull/3032)) \ No newline at end of file diff --git a/.changelog/v15.2.0/summary.md b/.changelog/v15.2.0/summary.md deleted file mode 100644 index 4c55d041..00000000 --- a/.changelog/v15.2.0/summary.md +++ /dev/null @@ -1,2 +0,0 @@ -*March 29, 2024* - diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index e13221c5..39fc27f1 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,5 +1,5 @@ # CODEOWNERS: https://help.github.com/articles/about-codeowners/ # Primary repo maintainers -* @alexanderbez @zmanian @crodriguezvega @jackzampolin @cosmos/informal_gaia_maintain +* @giunatale @tbruyelle @jaekwon @adam-hanna diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md index 6749f710..85d7f176 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.md +++ b/.github/ISSUE_TEMPLATE/bug-report.md @@ -16,7 +16,7 @@ v Please also ensure that this is not a duplicate issue :) ## Version - + ## Steps to Reproduce diff --git a/.github/ISSUE_TEMPLATE/epic-template.md b/.github/ISSUE_TEMPLATE/epic-template.md deleted file mode 100644 index 6738bb12..00000000 --- a/.github/ISSUE_TEMPLATE/epic-template.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -name: EPIC Template -about: Basic template for EPICs (used by the team) -labels: epic, needs-triage ---- - -## Problem - - - -## Closing criteria - - - - -## Problem details - - - -## Task list - -```[tasklist] -### Must have - -``` - -```[tasklist] -### Nice to have - -``` \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md index 84fc09dc..43777957 100644 --- a/.github/ISSUE_TEMPLATE/feature-request.md +++ b/.github/ISSUE_TEMPLATE/feature-request.md @@ -19,7 +19,7 @@ v without deliberation ## Proposal diff --git a/.github/ISSUE_TEMPLATE/tech-debt.md b/.github/ISSUE_TEMPLATE/tech-debt.md deleted file mode 100644 index 781cd9e0..00000000 --- a/.github/ISSUE_TEMPLATE/tech-debt.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -name: Tech Debt -about: Create an issue to address and reduce technical debt -label: technical-debt, needs-triage - ---- - - - -## Summary - - - -## Type - - - -## Impact - - - -## Proposed Solution - - - -____ - -#### For Admin Use - -- [ ] Not duplicate issue -- [ ] Appropriate labels applied -- [ ] Appropriate contributors tagged -- [ ] Contributor assigned/self-assigned -- [ ] Is a spike necessary to map out how the issue should be approached? \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/upgrade-checklist.md b/.github/ISSUE_TEMPLATE/upgrade-checklist.md deleted file mode 100644 index 1aaef755..00000000 --- a/.github/ISSUE_TEMPLATE/upgrade-checklist.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -name: Cosmos Hub Upgrade Checklist -about: Create a checklist for an upgrade -labels: epic, needs-triage ---- - -## Cosmos Hub Upgrade to Gaia - - - -```[tasklist] -### After Cutting Release Candidate -- [ ] Coordinate with Hypha to test release candidate -- [ ] Create proposal text draft -- [ ] Post proposal text draft on forum -- [ ] Upgrade release and replicated security testnets (note: on Wednesdays) -- [ ] Review post-upgrade status of affected features if necessary -``` - -```[tasklist] -### Before Proposal Submission (TODO sync on a call) -- [ ] Cut final release -- [ ] Predict block height for target date -- [ ] Update/proofread proposal text -- [ ] Transfer deposit amount (i.e., 250 ATOMs) to submitter wallet -- [ ] Create upgrade docs (with disclaimer upgrade prop still being voted on) -- [ ] Coordinate with marketing/comms to prep communication channels/posts -``` - -```[tasklist] -### Voting Period -- [ ] Estimate threshold of validators that are aware of proposal and have voted or confirmed their vote -- [ ] Coordinate with marketing/comms to update on voting progress (and any change in upgrade time) -``` - -```[tasklist] -## Proposal Passed -- [ ] Determine "on-call" team: available on Discord in [#cosmos-hub-validators-verified](https://discord.com/channels/669268347736686612/798937713474142229) during upgrade -- [ ] Coordinate with marketing/comms on who will be available, increase regular upgrade time updates and validator outreach -- [ ] Prep Gaia docs: `docs/getting-started/quickstart.md`, `docs/hub-tutorials/join-mainnet.md`, `docs/migration/` (open PR) -- [ ] Prep chain-registry update: [cosmoshub/chain.json](https://github.com/toschdev/chain-registry/blob/master/cosmoshub/chain.json) (open PR) -- [ ] Prep [cosmos mainnet repo](https://github.com/cosmos/mainnet) update (open PR) -- [ ] Prep internal statesync node for upgrade (confirm cosmovisor configured) -- [ ] Reach out to main dependency teams -- Comet, IBC, SDK -- for assistance during the upgrade (#gaia-release-warroom on Slack) -``` - -```[tasklist] -## During Upgrade (note: on Wednesdays at 15:00 UTC) -- [ ] Available on Discord in [#cosmos-hub-validators-verified](https://discord.com/channels/669268347736686612/798937713474142229) -- [ ] Available on Twitter / Slack / Telegram -``` - -```[tasklist] -## Post Upgrade -- [ ] Merge PRs for Gaia docs & chain-registry update -- [ ] FAQ: collect issues on upgrade from discord -- [ ] Hold validator feedback session -``` diff --git a/.github/PULL_REQUEST_TEMPLATE/docs.md b/.github/PULL_REQUEST_TEMPLATE/docs.md index e80f37ae..84269188 100644 --- a/.github/PULL_REQUEST_TEMPLATE/docs.md +++ b/.github/PULL_REQUEST_TEMPLATE/docs.md @@ -16,7 +16,7 @@ please add links to any relevant follow up issues.* I have... - [ ] included the correct `docs:` prefix in the PR title -- [ ] targeted the correct branch (see [PR Targeting](https://github.com/cosmos/gaia/blob/main/CONTRIBUTING.md#pr-targeting)) +- [ ] targeted the correct branch (see [PR Targeting](https://github.com/atomone-hub/atomone/blob/main/CONTRIBUTING.md#pr-targeting)) - [ ] provided a link to the relevant issue or specification - [ ] reviewed "Files changed" and left comments if necessary - [ ] confirmed all CI checks have passed diff --git a/.github/PULL_REQUEST_TEMPLATE/others.md b/.github/PULL_REQUEST_TEMPLATE/others.md index c7d6b7b5..3796dbdb 100644 --- a/.github/PULL_REQUEST_TEMPLATE/others.md +++ b/.github/PULL_REQUEST_TEMPLATE/others.md @@ -15,7 +15,7 @@ please add links to any relevant follow up issues.* I have... - [ ] Included the correct [type prefix](https://github.com/commitizen/conventional-commit-types/blob/v3.0.0/index.json) in the PR title -- [ ] Targeted the correct branch (see [PR Targeting](https://github.com/cosmos/gaia/blob/main/CONTRIBUTING.md#pr-targeting)) +- [ ] Targeted the correct branch (see [PR Targeting](https://github.com/atomone-hub/atomone/blob/main/CONTRIBUTING.md#pr-targeting)) - [ ] Provided a link to the relevant issue or specification - [ ] Reviewed "Files changed" and left comments if necessary - [ ] Confirmed all CI checks have passed diff --git a/.github/PULL_REQUEST_TEMPLATE/production.md b/.github/PULL_REQUEST_TEMPLATE/production.md index 79b5534f..349e6cd2 100644 --- a/.github/PULL_REQUEST_TEMPLATE/production.md +++ b/.github/PULL_REQUEST_TEMPLATE/production.md @@ -22,10 +22,10 @@ I have... * [ ] Included the correct [type prefix](https://github.com/commitizen/conventional-commit-types/blob/v3.0.0/index.json) in the PR title * [ ] Added `!` to the type prefix if API, client, or state breaking change (i.e., requires minor or major version bump) -* [ ] Targeted the correct branch (see [PR Targeting](https://github.com/cosmos/gaia/blob/main/CONTRIBUTING.md#pr-targeting)) +* [ ] Targeted the correct branch (see [PR Targeting](https://github.com/atomone-hub/atomone/blob/main/CONTRIBUTING.md#pr-targeting)) * [ ] Provided a link to the relevant issue or specification -* [ ] Followed the guidelines for [building SDK modules](https://github.com/cosmos/cosmos-sdk/blob/main/docs/docs/building-modules) -* [ ] Included the necessary unit and integration [tests](https://github.com/cosmos/gaia/blob/main/CONTRIBUTING.md#testing) +* [ ] Followed the guidelines for [building SDK modules](https://github.com/atomone-hub/atomone/blob/main/docs/docs/building-modules) +* [ ] Included the necessary unit and integration [tests](https://github.com/atomone-hub/atomone/blob/main/CONTRIBUTING.md#testing) * [ ] Added a changelog entry in `.changelog` (for details, see [contributing guidelines](../../CONTRIBUTING.md#changelog)) * [ ] Included comments for [documenting Go code](https://blog.golang.org/godoc) * [ ] Updated the relevant documentation or specification diff --git a/.github/dependabot.yml b/.github/dependabot.yml deleted file mode 100644 index 1296f84b..00000000 --- a/.github/dependabot.yml +++ /dev/null @@ -1,48 +0,0 @@ -version: 2 -updates: - - package-ecosystem: github-actions - directory: "/" - schedule: - interval: weekly - open-pull-requests-limit: 10 - labels: - - "A:automerge" - - - package-ecosystem: gomod - directory: "/" - schedule: - interval: weekly - open-pull-requests-limit: 10 - labels: - - "A:automerge" - - dependencies - - - package-ecosystem: gomod - directory: "/" - schedule: - interval: daily - target-branch: "release/v14.1.x" - # Only allow automated security-related dependency updates on release branches. - open-pull-requests-limit: 0 - labels: - - dependencies - - - package-ecosystem: gomod - directory: "/" - schedule: - interval: daily - target-branch: "release/v13.x" - # Only allow automated security-related dependency updates on release branches. - open-pull-requests-limit: 0 - labels: - - dependencies - - - package-ecosystem: gomod - directory: "/" - schedule: - interval: daily - target-branch: "release/v12.x" - # Only allow automated security-related dependency updates on release branches. - open-pull-requests-limit: 0 - labels: - - dependencies diff --git a/.github/workflows/deploy-docs.yml b/.github/workflows/deploy-docs.yml deleted file mode 100644 index 31c4af1b..00000000 --- a/.github/workflows/deploy-docs.yml +++ /dev/null @@ -1,47 +0,0 @@ -name: Deploy docs -# This job builds and deploys documenation to github pages. -# It runs on every push to main with a change in the docs folder. -on: - workflow_dispatch: - push: - branches: - - main - # - "release/**" - paths: - - "docs/**" - # - "x/**/*.md" - - .github/workflows/deploy-docs.yml - -permissions: - contents: read - -jobs: - build-and-deploy: - permissions: - contents: write # for JamesIves/github-pages-deploy-action to push changes in repo - runs-on: ubuntu-latest - steps: - - name: Checkout 🛎️ - uses: actions/checkout@v3 - with: - persist-credentials: false - fetch-depth: 0 - path: "." - - - name: Setup Node.js 🔧 - uses: actions/setup-node@v3 - with: - node-version: "16.x" - - # npm install npm should be removed when https://github.com/npm/cli/issues/4942 is fixed - - name: Build 🔧 - run: | - npm install -g npm@8.5.5 - make build-docs - - - name: Deploy 🚀 - uses: JamesIves/github-pages-deploy-action@v4.4.3 - with: - branch: gh-pages - folder: ~/output - single-commit: true \ No newline at end of file diff --git a/.github/workflows/nightly-tests.yml b/.github/workflows/nightly-tests.yml index 16ecf0c1..7ea3e929 100644 --- a/.github/workflows/nightly-tests.yml +++ b/.github/workflows/nightly-tests.yml @@ -9,10 +9,10 @@ on: jobs: run-tests: - uses: cosmos/gaia/.github/workflows/test.yml@main + uses: atomone-hub/atomone/.github/workflows/test.yml@main run-simulations: - uses: cosmos/gaia/.github/workflows/sims.yml@main + uses: atomone-hub/atomone/.github/workflows/sims.yml@main run-vulncheck: runs-on: ubuntu-latest @@ -60,4 +60,4 @@ jobs: } } ] - } \ No newline at end of file + } diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 627ca010..b8dd4aa9 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -73,7 +73,7 @@ jobs: **/**.go go.mod go.sum - - name: Build Gaia Docker Image + - name: Build AtomOne Docker Image run: make docker-build-debug - name: Build Hermes Docker Image run: make docker-build-hermes @@ -119,7 +119,7 @@ jobs: **/**.go go.mod go.sum - - name: Install Gaia + - name: Install AtomOne run: | make build if: env.GIT_DIFF @@ -131,52 +131,3 @@ jobs: run: | ./contrib/scripts/test_localnet_liveness.sh 100 5 50 localhost if: env.GIT_DIFF - - upgrade-test: - runs-on: ubuntu-latest - timeout-minutes: 30 - steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - uses: technote-space/get-diff-action@v6.0.1 - with: - PATTERNS: | - **/**.go - go.mod - go.sum - - uses: actions/setup-go@v5 - with: - go-version: 1.20.x - # the old gaiad binary version is hardcoded, need to be updated each major release. - - name: Install Old Gaiad - run: | - git checkout v14.0.0 - make build - cp ./build/gaiad ./build/gaiadold - go clean -modcache - if: env.GIT_DIFF - - name: Install New Gaiad - run: | - git checkout - - make build - cp ./build/gaiad ./build/gaiadnew - go clean -modcache - if: env.GIT_DIFF - - name: Install Cosmovisor - run: | - go install github.com/cosmos/cosmos-sdk/cosmovisor/cmd/cosmovisor@latest - if: env.GIT_DIFF - - name: Start Old Gaiad Binary - run: | - go env GOPATH - ./contrib/scripts/upgrade_test_scripts/run_gaia.sh - if: env.GIT_DIFF - - name: Submit Upgrade Commands - run: | - ./contrib/scripts/upgrade_test_scripts/run_upgrade_commands.sh 15 - if: env.GIT_DIFF - - name: Check for successful upgrade - run: | - ./contrib/scripts/upgrade_test_scripts/test_upgrade.sh 20 5 16 localhost - if: env.GIT_DIFF diff --git a/.gitignore b/.gitignore index 18fa78b6..828aba66 100644 --- a/.gitignore +++ b/.gitignore @@ -24,7 +24,6 @@ docs/node_modules # Data - ideally these don't exist baseapp/data/* client/lcd/keys/* -cmd/gaiacli/statik/statik.go mytestnet # Testing diff --git a/.gitpod.yml b/.gitpod.yml deleted file mode 100644 index bd5e62ec..00000000 --- a/.gitpod.yml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: - - init: go get && go build ./... && go test ./... && make - command: go run -image: ghcr.io/notional-labs/cosmos diff --git a/.golangci.yml b/.golangci.yml index 6eae92cb..1871e038 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -68,7 +68,7 @@ linters-settings: - prefix(github.com/cosmos) # cosmos org - prefix(cosmossdk.io) # new modules - prefix(github.com/cosmos/cosmos-sdk) # cosmos sdk - - prefix(github.com/cosmos/gaia) # Gaia + - prefix(github.com/atomone-hub/atomone) # AtomOne dogsled: max-blank-identifiers: 3 maligned: diff --git a/.goreleaser.yml b/.goreleaser.yml index be707e42..e09ff4f7 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -1,13 +1,13 @@ --- -project_name: gaia +project_name: atomone env: - GO111MODULE=on builds: - - main: ./cmd/gaiad - id: "gaiad" - binary: gaiad + - main: ./cmd/atomoned + id: "atomoned" + binary: atomoned mod_timestamp: "{{ .CommitTimestamp }}" flags: - -tags=netgo ledger @@ -16,7 +16,7 @@ builds: - CGO_ENABLED=0 ldflags: # .Env.TM_VERSION is provided in the workflow runner environment -> see .github/workflows/release.yml - - -s -w -X main.commit={{.Commit}} -X main.date={{ .CommitDate }} -X github.com/cosmos/cosmos-sdk/version.Name=gaia -X github.com/cosmos/cosmos-sdk/version.AppName=gaiad -X github.com/cosmos/cosmos-sdk/version.Version=v{{ .Version }} -X github.com/cosmos/cosmos-sdk/version.Commit={{ .Commit }} -X github.com/cosmos/cosmos-sdk/version.BuildTags=netgo,ledger -X github.com/tendermint/tendermint/version.TMCoreSemVer={{ .Env.TM_VERSION }} + - -s -w -X main.commit={{.Commit}} -X main.date={{ .CommitDate }} -X github.com/cosmos/cosmos-sdk/version.Name=atomone -X github.com/cosmos/cosmos-sdk/version.AppName=atomeond -X github.com/cosmos/cosmos-sdk/version.Version=v{{ .Version }} -X github.com/cosmos/cosmos-sdk/version.Commit={{ .Commit }} -X github.com/cosmos/cosmos-sdk/version.BuildTags=netgo,ledger -X github.com/tendermint/tendermint/version.TMCoreSemVer={{ .Env.TM_VERSION }} goos: - darwin - linux diff --git a/.mergify.yml b/.mergify.yml index aac2b9a5..ca23c9e8 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -23,30 +23,3 @@ pull_request_rules: commit_message_template: | {{ title }} (#{{ number }}) {{ body }} - - - name: Backport patches to the release/v12.x branch - conditions: - - base=main - - label=A:backport/v12.x - actions: - backport: - branches: - - release/v12.x - - - name: Backport patches to the release/v13.x branch - conditions: - - base=main - - label=A:backport/v13.x - actions: - backport: - branches: - - release/v13.x - - - name: Backport patches to the release/v14.1.x branch - conditions: - - base=main - - label=A:backport/v14.1.x - actions: - backport: - branches: - - release/v14.1.x \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index bf07e6bf..78ae08fc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,139 +1,25 @@ # CHANGELOG -## v15.2.0 +## Unreleased -*March 29, 2024* +*Release date* -### BUG FIXES - -- Increase x/gov metadata fields length to 10200 ([\#3025](https://github.com/cosmos/gaia/pull/3025)) -- Fix parsing of historic Txs with TxExtensionOptions ([\#3032](https://github.com/cosmos/gaia/pull/3032)) - -### STATE BREAKING - -- Increase x/gov metadata fields length to 10200 ([\#3025](https://github.com/cosmos/gaia/pull/3025)) -- Fix parsing of historic Txs with TxExtensionOptions ([\#3032](https://github.com/cosmos/gaia/pull/3032)) - -## v15.1.0 +### API BREAKING -*March 15, 2024* +### BUG FIXES ### DEPENDENCIES -- Bump [PFM](https://github.com/cosmos/ibc-apps/tree/main/middleware) to `v7.1.3-0.20240228213828-cce7f56d000b`. - ([\#2982](https://github.com/cosmos/gaia/pull/2982)) - ### FEATURES -- Add gaiad snapshots command set ([\#2974](https://github.com/cosmos/gaia/pull/2974)) - ### STATE BREAKING -- Bump [PFM](https://github.com/cosmos/ibc-apps/tree/main/middleware) to `v7.1.3-0.20240228213828-cce7f56d000b`. - ([\#2982](https://github.com/cosmos/gaia/pull/2982)) -- Mint and transfer missing assets in escrow accounts - to reach parity with counterparty chain supply. - ([\#2993](https://github.com/cosmos/gaia/pull/2993)) - -## v15.0.0 +## v1.0.0 -*February 20, 2024* - -### API BREAKING - -- Reject `MsgVote` messages from accounts with less than 1 atom staked. - ([\#2912](https://github.com/cosmos/gaia/pull/2912)) -- Bump [cosmos-sdk](https://github.com/cosmos/cosmos-sdk) to - [v0.47.10-ics-lsm](https://github.com/cosmos/cosmos-sdk/tree/v0.47.10-ics-lsm). - As compared to [v0.47.10](https://github.com/cosmos/cosmos-sdk/tree/v0.47.10), - this special branch of cosmos-sdk has the following API-breaking changes: - ([\#2967](https://github.com/cosmos/gaia/pull/2967)) - - Limit the accepted deposit coins for a proposal to the minimum proposal deposit denoms (e.g., `uatom` for Cosmos Hub). ([sdk-#19302](https://github.com/cosmos/cosmos-sdk/pull/19302)) - - Add denom check to reject denoms outside of those listed in `MinDeposit`. A new `MinDepositRatio` param is added (with a default value of `0.01`) and now deposits are required to be at least `MinDepositRatio*MinDeposit` to be accepted. ([sdk-#19312](https://github.com/cosmos/cosmos-sdk/pull/19312)) - - Disable the `DenomOwners` query. ([sdk-#19266](https://github.com/cosmos/cosmos-sdk/pull/19266)) -- The consumer CCV genesis state obtained from the provider chain needs to be - transformed to be compatible with older versions of consumer chains - (see [ICS docs](https://cosmos.github.io/interchain-security/consumer-development/consumer-genesis-transformation)). - ([\#2967](https://github.com/cosmos/gaia/pull/2967)) +*Release date* ### BUG FIXES -- Add ante handler that only allows `MsgVote` messages from accounts with at least - 1 atom staked. ([\#2912](https://github.com/cosmos/gaia/pull/2912)) -- Bump [cosmos-sdk](https://github.com/cosmos/cosmos-sdk) to - [v0.47.10-ics-lsm](https://github.com/cosmos/cosmos-sdk/tree/v0.47.10-ics-lsm). - This special branch of cosmos-sdk backports a series of fixes for issues found - during the [Oak Security audit of SDK 0.47](https://github.com/oak-security/audit-reports/blob/master/Cosmos%20SDK/2024-01-23%20Audit%20Report%20-%20Cosmos%20SDK%20v1.0.pdf). - ([\#2967](https://github.com/cosmos/gaia/pull/2967)) - - Backport [sdk-#18146](https://github.com/cosmos/cosmos-sdk/pull/18146): Add denom check to reject denoms outside of those listed in `MinDeposit`. A new `MinDepositRatio` param is added (with a default value of `0.01`) and now deposits are required to be at least `MinDepositRatio*MinDeposit` to be accepted. ([sdk-#19312](https://github.com/cosmos/cosmos-sdk/pull/19312)) - - Partially backport [sdk-#18047](https://github.com/cosmos/cosmos-sdk/pull/18047): Add a limit of 200 grants pruned per `EndBlock` in the feegrant module. ([sdk-#19314](https://github.com/cosmos/cosmos-sdk/pull/19314)) - - Partially backport [skd-#18737](https://github.com/cosmos/cosmos-sdk/pull/18737): Add a limit of 200 grants pruned per `BeginBlock` in the authz module. ([sdk-#19315](https://github.com/cosmos/cosmos-sdk/pull/19315)) - - Backport [sdk-#18173](https://github.com/cosmos/cosmos-sdk/pull/18173): Gov Hooks now returns error and are "blocking" if they fail. Expect for `AfterProposalFailedMinDeposit` and `AfterProposalVotingPeriodEnded` that will log the error and continue. ([sdk-#19305](https://github.com/cosmos/cosmos-sdk/pull/19305)) - - Backport [sdk-#18189](https://github.com/cosmos/cosmos-sdk/pull/18189): Limit the accepted deposit coins for a proposal to the minimum proposal deposit denoms. ([sdk-#19302](https://github.com/cosmos/cosmos-sdk/pull/19302)) - - Backport [sdk-#18214](https://github.com/cosmos/cosmos-sdk/pull/18214) and [sdk-#17352](https://github.com/cosmos/cosmos-sdk/pull/17352): Ensure that modifying the argument to `NewUIntFromBigInt` and `NewIntFromBigInt` doesn't mutate the returned value. ([sdk-#19293](https://github.com/cosmos/cosmos-sdk/pull/19293)) - - ### DEPENDENCIES -- Bump [ibc-go](https://github.com/cosmos/ibc-go) to - [v7.3.1](https://github.com/cosmos/ibc-go/releases/tag/v7.3.1) - ([\#2852](https://github.com/cosmos/gaia/pull/2852)) -- Bump [PFM](https://github.com/cosmos/ibc-apps/tree/main/middleware) - to [v7.1.2](https://github.com/cosmos/ibc-apps/releases/tag/middleware%2Fpacket-forward-middleware%2Fv7.1.2) - ([\#2852](https://github.com/cosmos/gaia/pull/2852)) -- Bump [CometBFT](https://github.com/cometbft/cometbft) - to [v0.37.4](https://github.com/cometbft/cometbft/releases/tag/v0.37.4) - ([\#2852](https://github.com/cosmos/gaia/pull/2852)) -- Bump [cosmos-sdk](https://github.com/cosmos/cosmos-sdk) to - [v0.47.10-ics-lsm](https://github.com/cosmos/cosmos-sdk/tree/v0.47.10-ics-lsm). - This is a special cosmos-sdk branch with support for both ICS and LSM. - ([\#2967](https://github.com/cosmos/gaia/pull/2967)) -- Bump [ICS](https://github.com/cosmos/interchain-security) to - [v3.3.3-lsm](https://github.com/cosmos/interchain-security/releases/tag/v3.3.3-lsm) - ([\#2967](https://github.com/cosmos/gaia/pull/2967)) - ### FEATURES - -- Add support for metaprotocols using Tx extension options. - ([\#2960](https://github.com/cosmos/gaia/pull/2960)) - -### STATE BREAKING - -- Bump [ibc-go](https://github.com/cosmos/ibc-go) to - [v7.3.1](https://github.com/cosmos/ibc-go/releases/tag/v7.3.1) - ([\#2852](https://github.com/cosmos/gaia/pull/2852)) -- Bump [PFM](https://github.com/cosmos/ibc-apps/tree/main/middleware) - to [v7.1.2](https://github.com/cosmos/ibc-apps/releases/tag/middleware%2Fpacket-forward-middleware%2Fv7.1.2) - ([\#2852](https://github.com/cosmos/gaia/pull/2852)) -- Bump [CometBFT](https://github.com/cometbft/cometbft) - to [v0.37.4](https://github.com/cometbft/cometbft/releases/tag/v0.37.4) - ([\#2852](https://github.com/cosmos/gaia/pull/2852)) -- Set min commission rate staking parameter to `5%` - ([prop 826](https://www.mintscan.io/cosmos/proposals/826)) - and update the commission rate for all validators that have a commission - rate less than `5%`. ([\#2855](https://github.com/cosmos/gaia/pull/2855)) -- Migrate the signing infos of validators for which the consensus address is missing. -([\#2886](https://github.com/cosmos/gaia/pull/2886)) -- Migrate vesting funds from "cosmos145hytrc49m0hn6fphp8d5h4xspwkawcuzmx498" - to community pool according to signal prop [860](https://www.mintscan.io/cosmos/proposals/860). - ([\#2891](https://github.com/cosmos/gaia/pull/2891)) -- Add ante handler that only allows `MsgVote` messages from accounts with at least - 1 atom staked. ([\#2912](https://github.com/cosmos/gaia/pull/2912)) -- Remove `GovPreventSpamDecorator` and initialize the `MinInitialDepositRatio` gov - param to `10%`. - ([\#2913](https://github.com/cosmos/gaia/pull/2913)) -- Add support for metaprotocols using Tx extension options. - ([\#2960](https://github.com/cosmos/gaia/pull/2960)) -- Bump [cosmos-sdk](https://github.com/cosmos/cosmos-sdk) to - [v0.47.10-ics-lsm](https://github.com/cosmos/cosmos-sdk/tree/v0.47.10-ics-lsm). - This is a special cosmos-sdk branch with support for both ICS and LSM. - ([\#2967](https://github.com/cosmos/gaia/pull/2967)) - - Skip running `addDenomReverseIndex` in `bank/v3` migration as it is prohibitively expensive to run on the Cosmos Hub. ([sdk-#19266](https://github.com/cosmos/cosmos-sdk/pull/19266)) -- Bump [ICS](https://github.com/cosmos/interchain-security) to - [v3.3.3-lsm](https://github.com/cosmos/interchain-security/releases/tag/v3.3.3-lsm) - ([\#2967](https://github.com/cosmos/gaia/pull/2967)) - -## Previous Versions - -[CHANGELOG of previous versions](https://github.com/cosmos/gaia/blob/main/CHANGELOG.md) - diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 29a02248..8479e66b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,7 +2,6 @@ - [Contributing](#contributing) - [Overview](#overview) - - [Responsibilities of the stewarding team](#responsibilities-of-the-stewarding-team) - [Ease of reviewing](#ease-of-reviewing) - [Workflow](#workflow) - [Project Board](#project-board) @@ -13,13 +12,12 @@ - [Pull Request Templates](#pull-request-templates) - [Requesting Reviews](#requesting-reviews) - [Updating Documentation](#updating-documentation) - - [Changelog](#changelog) - [Dependencies](#dependencies) - [Protobuf](#protobuf) - [Branching Model and Release](#branching-model-and-release) - [PR Targeting](#pr-targeting) -Thank you for considering making contributions to Gaia! 🎉👍 +Thank you for considering making contributions to AtomOne! 🎉👍 ## Overview @@ -28,19 +26,9 @@ discussion or proposing code changes. Following the processes outlined in this document will lead to the best chance of getting changes merged into the codebase. -### Responsibilities of the stewarding team - -Gaia has many stakeholders contributing and shaping the project. -The _Gaia stewarding team_ is composed of Informal Systems developers and -is responsible for stewarding this project over time. -This means that the stewarding team needs to understand the nature of, -and agree to maintain, all of the changes that land on `main` or a backport branch. -It may cost a few days/weeks' worth of time to _submit_ a particular change, -but _maintaining_ that change over the years has a much higher cost that the stewarding team will bear. - ### Ease of reviewing - The fact that the stewarding team needs to be able to deeply understand the short-, + The fact that the codeowners need to be able to deeply understand the short-, medium- and long-term consequences of incoming changes means that changes need to be **easy to review**. @@ -73,12 +61,12 @@ but _maintaining_ that change over the years has a much higher cost that the ste To ensure a smooth workflow for all contributors, a general procedure for contributing has been established. -1. Start by browsing [existing issues](https://github.com/cosmos/gaia/issues) and [discussions](https://github.com/cosmos/gaia/discussions). If you are looking for something interesting or if you have something in your mind, there is a chance it had been discussed. - * Looking for a good place to start contributing? How about checking out some [good first issues](https://github.com/cosmos/gaia/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) or [bugs](https://github.com/cosmos/gaia/issues?q=is%3Aopen+is%3Aissue+label%3Abug)? +1. Start by browsing [existing issues](https://github.com/atomone-hub/atomone/issues) and [discussions](https://github.com/atomone-hub/atomone/discussions). If you are looking for something interesting or if you have something in your mind, there is a chance it had been discussed. + * Looking for a good place to start contributing? How about checking out some [good first issues](https://github.com/atomone-hub/atomone/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) or [bugs](https://github.com/atomone-hub/atomone/issues?q=is%3Aopen+is%3Aissue+label%3Abug)? 2. Determine whether a GitHub issue or discussion is more appropriate for your needs: - 1. If you want to propose something new that requires specification or an additional design, or you would like to change a process, start with a [new discussion](https://github.com/cosmos/gaia/discussions/new/choose). With discussions, we can better handle the design process using discussion threads. A discussion usually leads to one or more issues. - 2. If the issue you want addressed is a specific proposal or a bug, then open a [new issue](https://github.com/cosmos/gaia/issues/new/choose). - 3. Review existing [issues](https://github.com/cosmos/gaia/issues) to find an issue you'd like to help with. + 1. If you want to propose something new that requires specification or an additional design, or you would like to change a process, start with a [new discussion](https://github.com/atomone-hub/atomone/discussions/new/choose). With discussions, we can better handle the design process using discussion threads. A discussion usually leads to one or more issues. + 2. If the issue you want addressed is a specific proposal or a bug, then open a [new issue](https://github.com/atomone-hub/atomone/issues/new/choose). + 3. Review existing [issues](https://github.com/atomone-hub/atomone/issues) to find an issue you'd like to help with. 3. Participate in thoughtful discussion on that issue. 4. If you would like to contribute: 1. Ensure that the proposal has been accepted. @@ -86,7 +74,7 @@ To ensure a smooth workflow for all contributors, a general procedure for contri make sure to contact them to collaborate. 3. If nobody has been assigned for the issue and you would like to work on it, make a comment on the issue to inform the community of your intentions - to begin work and please wait for an acknowledgement from the stewarding team. + to begin work. 5. To submit your work as a contribution to the repository, follow standard GitHub best practices. See [development procedure guidelines](#development-procedure) below. @@ -96,17 +84,15 @@ PRs opened before adequate design discussion has taken place in a GitHub issue h ## Project Board -We use self-organizing principles to coordinate and collaborate across organizations in structured "EPICs" that focus on specific problem domains or architectural components of Gaia. For details, see the [GitHub Project board](https://github.com/orgs/cosmos/projects/28/views/11). - -The developers work in sprints, which are available in a [GitHub Project](https://github.com/orgs/cosmos/projects/28/views/2). +We use self-organizing principles to coordinate and collaborate across organizations in structured "EPICs" that focus on specific problem domains or architectural components of AtomOne. ## Architecture Decision Records (ADR) -When proposing an architecture decision for Gaia, please start by opening an [issue](https://github.com/cosmos/gaia/issues/new/choose) or a [discussion](https://github.com/cosmos/gaia/discussions/new) with a summary of the proposal. Once the proposal has been discussed and there is rough alignment on a high-level approach to the design, you may either start development, or write an ADR. +When proposing an architecture decision for AtomOne, please start by opening an [issue](https://github.com/atomone-hub/atomone/issues/new/choose) or a [discussion](https://github.com/atomone-hub/atomone/discussions/new) with a summary of the proposal. Once the proposal has been discussed and there is rough alignment on a high-level approach to the design, you may either start development, or write an ADR. If your architecture decision is a simple change, you may contribute directly without writing an ADR. However, if you are proposing a significant change, please include a corresponding ADR. -To create an ADR, follow the [template](./docs/architecture/adr-template.md) and [doc](./docs/architecture/README.md). If you would like to see examples of how these are written, please refer to the current [ADRs](https://github.com/cosmos/gaia/tree/main/docs/architecture). +To create an ADR, follow the [template](./docs/architecture/adr-template.md) and [doc](./docs/architecture/README.md). If you would like to see examples of how these are written, please refer to the current [ADRs](https://github.com/atomone-hub/atomone/tree/main/docs/architecture). ## Development Procedure @@ -116,20 +102,20 @@ Depending on the scope of the work, we differentiate between self-contained pull **Self-contained pull requests**: -* Fork the repo (core developers must create a branch directly in the Gaia repo), +* Fork the repo (core developers must create a branch directly in the AtomOne repo), branch from the HEAD of `main`, make some commits, and submit a PR to `main`. -* For developers who are core contributors and are working within the `gaia` repo, follow branch name conventions to ensure clear +* For developers who are core contributors and are working within the `atomone` repo, follow branch name conventions to ensure clear ownership of branches: `{moniker}/{issue#}-branch-name`. * See [Branching Model](#branching-model-and-release) for more details. **Large contributions**: * Make sure that a feature branch is created in the repo. - This will be created by the stewarding team after design discussions. + This will be created by the codeowners after design discussions. The name convention for the feature branch must be `feat/{issue#}-branch-name`. Note that (similar to `main`) all feature branches have branch protection rules and they run the CI. Unlike `main`, feature branch may intermittently fail `make lint`, `make run-tests`, or `make build/install`. -* Fork the repo (core developers must create a branch directly in the Gaia repo), +* Fork the repo (core developers must create a branch directly in the AtomOne repo), branch from the HEAD of the feature branch, make some commits, and submit a PR to the feature branch. All PRs targeting a feature branch should follow the same guidelines in this document. * Once the feature is completed, submit a PR from the feature branch targeting `main`. @@ -140,11 +126,11 @@ will do it anyway using a pre-configured setup of the programming language mode) A convenience git `pre-commit` hook that runs the formatters automatically before each commit is available in the `contrib/githooks/` directory. -**Note:** Exceptions to the above guidelines are possible, but only after prior discussions with the stewarding team. +**Note:** Exceptions to the above guidelines are possible, but only after prior discussions with the codeowners. ### Testing -Tests can be executed by running `make run-tests` at the top level of the Gaia repository. +Tests can be executed by running `make run-tests` at the top level of the AtomOne repository. For running the e2e tests, make sure to build the docker images by running `make docker-build-all`. When testing a function under a variety of different inputs, we prefer to use @@ -180,10 +166,11 @@ Before submitting a pull request: Then: 1. If you have something to show, **start with a `Draft` PR**. It's good to have early validation of your work and we highly recommend this practice. A Draft PR also indicates to the community that the work is in progress. - Draft PRs also help the stewarding team provide early feedback and ensure the work is in the right direction. + Draft PRs also help the codeowners provide early feedback and ensure the work is in the right direction. 2. When the code is complete, change your PR from `Draft` to `Ready for Review`. 3. Go through the actions for each checkbox present in the PR template description. The PR actions are automatically provided for each new PR. - +4. Be sure to include a relevant changelog entry in the `Unreleased` section of `CHANGELOG.md` (see file for log format). + PRs must have a category prefix that is based on the type of changes being made (for example, `fix`, `feat`, `refactor`, `docs`, and so on). The [type](https://github.com/commitizen/conventional-commit-types/blob/v3.0.0/index.json) must be included in the PR title as a prefix (for example, `fix: `). @@ -231,64 +218,7 @@ items. In addition, use the following review explanations: ### Updating Documentation -If you open a PR in Gaia, it is mandatory to update the relevant documentation in `/docs`. - -### Changelog - -To manage and generate our changelog, we currently use [unclog](https://github.com/informalsystems/unclog). - -Every PR with types `fix`, `feat`, `deps`, and `refactor` should include a file -`.changelog/unreleased/${section}/[${component}/]${pr-number}-${short-description}.md`, -where: - -- `section` is one of - `dependencies`, `improvements`, `features`, `bug-fixes`, `state-breaking`, `api-breaking`, - and _**if multiple apply, create multiple files**_, - not necessarily with the same `short-description` or content; -- `pr-number` is the PR number; -- `short-description` is a short (4 to 6 word), hyphen separated description of the change; -- `component` is used for changes that affect one of the components defined in the [config](.changelog/config.toml), e.g., `tests`, `globalfee`. - -For examples, see the [.changelog](.changelog) folder. - -Use `unclog` to add a changelog entry in `.changelog` (check the [requirements](https://github.com/informalsystems/unclog#requirements) first): -```bash -# add a general entry -unclog add - -i "${pr-number}-${short-description}" - -p "${pr-number}" - -s "${section}" - -m "${description}" - -# add a entry to a component -unclog add - -i "${pr-number}-${short-description}" - -p "${pr-number}" - -c "${component}" - -s "${section}" - -m "${description}" -``` -where `${description}` is a detailed description of the changelog entry. - -For example, -```bash -# add an entry for bumping IBC to v4.4.2 -unclog add -i "2554-bump-ibc" -p 2554 -s dependencies -m "Bump [ibc-go](https://github.com/cosmos/ibc-go) to [v4.4.2](https://github.com/cosmos/ibc-go/releases/tag/v4.4.2)" - -# add an entry for changing the global fee module; -# note that the entry is added to both state-breaking and api-breaking sections -unclog add -i "2424-params" -p 2424 -c globalfee -s state-breaking -m "Add \`bypass-min-fee-msg-types\` and \`maxTotalBypassMinFeeMsgGagUsage\` to globalfee params" -unclog add -i "2424-params" -p 2424 -c globalfee -s api-breaking -m "Add \`bypass-min-fee-msg-types\` and \`maxTotalBypassMinFeeMsgGagUsage\` to globalfee params" -``` - -**Note:** `unclog add` requires an editor. This can be set either by configuring -an `$EDITOR` environment variable or by manually specify an editor binary path -via the `--editor` flag. - -**Note:** Changelog entries should answer the question: "what is important about this -change for users to know?" or "what problem does this solve for users?". It -should not simply be a reiteration of the title of the associated PR, unless the -title of the PR _very_ clearly explains the benefit of a change to a user. +If you open a PR in AtomOne, it is mandatory to update the relevant documentation in `/docs`. ## Dependencies @@ -299,7 +229,7 @@ The main branch of every Cosmos repository should just build with `go get`, which means they should be kept up-to-date with their dependencies so we can get away with telling people they can just `go get` our software. -When dependencies in Gaia's `go.mod` are changed, it is generally accepted practice +When dependencies in AtomOne's `go.mod` are changed, it is generally accepted practice to delete `go.sum` and then run `go mod tidy`. Since some dependencies are not under our control, a third party may break our @@ -307,7 +237,7 @@ build, in which case we can fall back on `go mod tidy -v`. ## Protobuf -We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along with [gogoproto](https://github.com/cosmos/gogoproto) to generate code for use in Gaia. +We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along with [gogoproto](https://github.com/cosmos/gogoproto) to generate code for use in AtomOne. For deterministic behavior around Protobuf tooling, everything is containerized using Docker. Make sure to have Docker installed on your machine, or head to [Docker's website](https://docs.docker.com/get-docker/) to install it. @@ -317,7 +247,7 @@ To generate the protobuf stubs, you can run `make proto-gen`. User-facing repos should adhere to the trunk based development branching model: https://trunkbaseddevelopment.com. User branches should start with a user name, example: `{moniker}/{issue#}-branch-name`. -Gaia follows [semantic versioning](https://semver.org), but with the some deviations to account for state-machine and API breaking changes. See [RELEASE_PROCESS.md](./RELEASE_PROCESS.md) for details. +AtomOne follows [semantic versioning](https://semver.org), but with the some deviations to account for state-machine and API breaking changes. See [RELEASE_PROCESS.md](./RELEASE_PROCESS.md) for details. ### PR Targeting diff --git a/Dockerfile b/Dockerfile index b27042c0..0289700f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,7 @@ ARG IMG_TAG=latest -# Compile the gaiad binary -FROM golang:1.21-alpine AS gaiad-builder +# Compile the atomoned binary +FROM golang:1.21-alpine AS atomoned-builder WORKDIR /src/app/ COPY go.mod go.sum* ./ RUN go mod download @@ -13,8 +13,8 @@ RUN CGO_ENABLED=0 make install # Add to a distroless container FROM cgr.dev/chainguard/static:$IMG_TAG ARG IMG_TAG -COPY --from=gaiad-builder /go/bin/gaiad /usr/local/bin/ +COPY --from=atomoned-builder /go/bin/atomoned /usr/local/bin/ EXPOSE 26656 26657 1317 9090 USER 0 -ENTRYPOINT ["gaiad", "start"] \ No newline at end of file +ENTRYPOINT ["atomoned", "start"] diff --git a/LICENSE b/LICENSE index 130a1fa3..6a4ec66a 100644 --- a/LICENSE +++ b/LICENSE @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2016 the Gaia authors + Copyright 2024 the AtomOne authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/Makefile b/Makefile index e0ae2219..d99db5e0 100644 --- a/Makefile +++ b/Makefile @@ -18,7 +18,7 @@ SDK_PACK := $(shell go list -m github.com/cosmos/cosmos-sdk | sed 's/ /\@/g') TM_VERSION := $(shell go list -m github.com/cometbft/cometbft | sed 's:.* ::') # grab everything after the space in "github.com/cometbft/cometbft v0.34.7" DOCKER := $(shell which docker) BUILDDIR ?= $(CURDIR)/build -TEST_DOCKER_REPO=cosmos/contrib-gaiatest +TEST_DOCKER_REPO=cosmos/contrib-atomeonetest GO_SYSTEM_VERSION = $(shell go version | cut -c 14- | cut -d' ' -f1 | cut -d'.' -f1-2) REQUIRE_GO_VERSION = 1.21 @@ -51,7 +51,7 @@ ifeq ($(LEDGER_ENABLED),true) endif endif -ifeq (cleveldb,$(findstring cleveldb,$(GAIA_BUILD_OPTIONS))) +ifeq (cleveldb,$(findstring cleveldb,$(ATOMONE_BUILD_OPTIONS))) build_tags += gcc cleveldb endif build_tags += $(BUILD_TAGS) @@ -64,17 +64,17 @@ build_tags_comma_sep := $(subst $(whitespace),$(comma),$(build_tags)) # process linker flags -ldflags = -X github.com/cosmos/cosmos-sdk/version.Name=gaia \ - -X github.com/cosmos/cosmos-sdk/version.AppName=gaiad \ +ldflags = -X github.com/cosmos/cosmos-sdk/version.Name=atomone \ + -X github.com/cosmos/cosmos-sdk/version.AppName=atomoned \ -X github.com/cosmos/cosmos-sdk/version.Version=$(VERSION) \ -X github.com/cosmos/cosmos-sdk/version.Commit=$(COMMIT) \ -X "github.com/cosmos/cosmos-sdk/version.BuildTags=$(build_tags_comma_sep)" \ -X github.com/cometbft/cometbft/version.TMCoreSemVer=$(TM_VERSION) -ifeq (cleveldb,$(findstring cleveldb,$(GAIA_BUILD_OPTIONS))) +ifeq (cleveldb,$(findstring cleveldb,$(ATOMONE_BUILD_OPTIONS))) ldflags += -X github.com/cosmos/cosmos-sdk/types.DBBackend=cleveldb endif -ifeq (,$(findstring nostrip,$(GAIA_BUILD_OPTIONS))) +ifeq (,$(findstring nostrip,$(ATOMONE_BUILD_OPTIONS))) ldflags += -w -s endif ldflags += $(LDFLAGS) @@ -82,7 +82,7 @@ ldflags := $(strip $(ldflags)) BUILD_FLAGS := -tags "$(build_tags)" -ldflags '$(ldflags)' # check for nostrip option -ifeq (,$(findstring nostrip,$(GAIA_BUILD_OPTIONS))) +ifeq (,$(findstring nostrip,$(ATOMONE_BUILD_OPTIONS))) BUILD_FLAGS += -trimpath endif @@ -97,7 +97,7 @@ include contrib/devtools/Makefile check_version: ifneq ($(GO_SYSTEM_VERSION), $(REQUIRE_GO_VERSION)) - @echo "ERROR: Go version 1.21 is required for $(VERSION) of Gaia." + @echo "ERROR: Go version 1.21 is required for $(VERSION) of AtomOne." endif all: install lint run-tests test-e2e vulncheck @@ -130,7 +130,7 @@ go.sum: go.mod draw-deps: @# requires brew install graphviz or apt-get install graphviz go install github.com/RobotsAndPencils/goviz - @goviz -i ./cmd/gaiad -d 2 | dot -Tpng -o dependency-graph.png + @goviz -i ./cmd/atomoned -d 2 | dot -Tpng -o dependency-graph.png clean: rm -rf $(BUILDDIR)/ artifacts/ @@ -215,7 +215,7 @@ endif .PHONY: run-tests $(TEST_TARGETS) docker-build-debug: - @docker build -t cosmos/gaiad-e2e -f e2e.Dockerfile . + @docker build -t cosmos/atomoned-e2e -f e2e.Dockerfile . # TODO: Push this to the Cosmos Dockerhub so we don't have to keep building it # in CI. @@ -252,16 +252,16 @@ format: ############################################################################### start-localnet-ci: build - rm -rf ~/.gaiad-liveness - ./build/gaiad init liveness --chain-id liveness --home ~/.gaiad-liveness - ./build/gaiad config chain-id liveness --home ~/.gaiad-liveness - ./build/gaiad config keyring-backend test --home ~/.gaiad-liveness - ./build/gaiad keys add val --home ~/.gaiad-liveness - ./build/gaiad genesis add-genesis-account val 10000000000000000000000000stake --home ~/.gaiad-liveness --keyring-backend test - ./build/gaiad genesis gentx val 1000000000stake --home ~/.gaiad-liveness --chain-id liveness - ./build/gaiad genesis collect-gentxs --home ~/.gaiad-liveness - sed -i.bak'' 's/minimum-gas-prices = ""/minimum-gas-prices = "0uatom"/' ~/.gaiad-liveness/config/app.toml - ./build/gaiad start --home ~/.gaiad-liveness --x-crisis-skip-assert-invariants + rm -rf ~/.atomoned-liveness + ./build/atomoned init liveness --chain-id liveness --home ~/.atomoned-liveness + ./build/atomoned config chain-id liveness --home ~/.atomoned-liveness + ./build/atomoned config keyring-backend test --home ~/.atomoned-liveness + ./build/atomoned keys add val --home ~/.atomoned-liveness + ./build/atomoned genesis add-genesis-account val 10000000000000000000000000stake --home ~/.atomoned-liveness --keyring-backend test + ./build/atomoned genesis gentx val 1000000000stake --home ~/.atomoned-liveness --chain-id liveness + ./build/atomoned genesis collect-gentxs --home ~/.atomoned-liveness + sed -i.bak'' 's/minimum-gas-prices = ""/minimum-gas-prices = "0uatom"/' ~/.atomoned-liveness/config/app.toml + ./build/atomoned start --home ~/.atomoned-liveness --x-crisis-skip-assert-invariants .PHONY: start-localnet-ci diff --git a/README.md b/README.md index b9cdc47a..b094f0c6 100644 --- a/README.md +++ b/README.md @@ -1,127 +1,25 @@ -# Cosmos Hub (Gaia) - -![Banner!](https://miro.medium.com/max/2000/1*DHtmSfS_Efvuq8n2LAnhkA.png) - -[![Project Status: Active -- The project has reached a stable, usable state and is being actively -developed.](https://img.shields.io/badge/repo%20status-Active-green.svg)](https://www.repostatus.org/#active) -[![License: Apache-2.0](https://img.shields.io/github/license/cosmos/gaia.svg)](https://github.com/cosmos/gaia/blob/main/LICENSE) -[![Version](https://img.shields.io/github/v/release/cosmos/gaia.svg)](https://github.com/cosmos/gaia/releases/latest) -[![Go Report Card](https://goreportcard.com/badge/github.com/cosmos/gaia)](https://goreportcard.com/report/github.com/cosmos/gaia) -[![GoDoc](https://img.shields.io/badge/godoc-reference-blue?logo=go)](https://pkg.go.dev/github.com/cosmos/gaia) -[![Lines of Code](https://sonarcloud.io/api/project_badges/measure?project=cosmos_gaia&metric=ncloc)](https://sonarcloud.io/summary/new_code?id=cosmos_gaia) -[![Quality Gate Status](https://sonarcloud.io/api/project_badges/measure?project=cosmos_gaia&metric=alert_status)](https://sonarcloud.io/summary/new_code?id=cosmos_gaia) -[![Coverage](https://sonarcloud.io/api/project_badges/measure?project=cosmos_gaia&metric=coverage)](https://sonarcloud.io/summary/new_code?id=cosmos_gaia) -[![Discord](https://badgen.net/badge/icon/discord?icon=discord&label)](https://discord.gg/cosmosnetwork) -[![Twitter](https://badgen.net/badge/icon/twitter?icon=twitter&label)](https://twitter.com/cosmoshub) - -The Cosmos Hub is the first of an exploding number of interconnected blockchains that comprise the Cosmos Network. - -
- -## 🤔 — Why should you be interested in the Cosmos Hub - -___ - -The Cosmos Hub is built using the [Cosmos SDK](https://github.com/cosmos/cosmos-sdk) and compiled to a binary called `gaiad` (Gaia Daemon). The Cosmos Hub and other fully sovereign Cosmos SDK blockchains interact with one another using a protocol called [IBC](https://github.com/cosmos/ibc) that enables Inter-Blockchain Communication. In order to understand what the Cosmos Hub is you can read this [introductory explanation](https://hub.cosmos.network/main/hub-overview/overview.html). - -
- -## ⚡ — Documentation & Introduction - -___ - -Cosmos Hub is a blockchain network that operates on Proof-of-Stake consensus. You can find an introduction to the Cosmos Hub and how to use the `gaiad` binary as a delegator, validator or node operator as well as how governance on the Cosmos Hub works in the [documentation](https://hub.cosmos.network/main/hub-overview/overview.html). - -Alternatively, whether you're new to blockchain technology or interested in getting involved, the Cosmos Network [Course](https://tutorials.cosmos.network/academy/0-welcome/) will guide you through everything. The course walks you through the basics of blockchain technology, to staking, setting up your own node, and beyond. - -
- -## 👤 — Node Operators - -___ -If you're interested in running a node on the current Cosmos Hub, check out the docs to [Join the Cosmos Hub Mainnet](https://github.com/cosmos/gaia/blob/main/docs/hub-tutorials/join-mainnet.md). - -
- -## 🗣️ — Validators - -___ - -If you want to participate and help secure Cosmos Hub, check out becoming a validator. Information on what a validator is and how to participate as one can be found at the [Validator FAQ](https://hub.cosmos.network/main/validators/validator-faq.html#). If you're running a validator node on the Cosmos Hub, reach out to a Janitor on the [Cosmos Developers Discord](https://discord.gg/cosmosnetwork) to join the `#cosmos-hub-validators-verified` channel. - -
- -## 👥 — Delegators - -___ - -If you still want to participate on the Cosmos Hub, check out becoming a delegator. Information on what a delegator is and how to participate as one can be found at the [Delegator FAQ](https://hub.cosmos.network/main/delegators/delegator-faq.html). - -
- -## 👥 — Testnet - -___ - -To participate in or utilize the current Cosmos Hub testnet, take a look at the [cosmos/testnets](https://github.com/cosmos/testnets) repository. This testnet is for the Theta Upgrade expected in Q1 2022. For future upgrades of the Cosmos Hub take a look at the [roadmap](https://github.com/cosmos/gaia/blob/main/docs/roadmap/cosmos-hub-roadmap-2.0.md). - -
- -## 🌐 — Roadmap - -___ - -For an overview of upcoming changes to the Cosmos Hub take a look at the [Roadmap](https://github.com/cosmos/gaia/blob/main/docs/roadmap/cosmos-hub-roadmap-2.0.md). - -
- -## 🗄️ — Archives & Genesis - -___ - -With each version of the Cosmos Hub, the chain is restarted from a new Genesis state. -Mainnet is currently running as `cosmoshub-4`. Archives of the state of `cosmoshub-1`, `cosmoshub-2`, and `cosmoshub-3` are available [here](./docs/resources/archives.md). - -If you are looking for historical genesis files and other data [`cosmos/mainnet`](http://github.com/cosmos/mainnet) is an excellent resource. Snapshots are also available at [cosmos.quicksync.io](https://cosmos.quicksync.io). - -
- -## 🤝 — How to contribute - -___ - -Check out [contributing.md](CONTRIBUTING.md) for our guidelines & policies for how we develop the Cosmos Hub. Thank you to all those who have contributed! - -
- -## 💬 — Talk to us - -___ - -We have active, helpful communities on Twitter, Discord, and Telegram. - -| | | -| -- | -- | -| Cosmos Developers Discord | Discord | -| Cosmos Twitter | Tweet | -| Cosmos Gov Twitter | Tweet | -| Cosmos Telegram | Telegram | - -For updates on the Cosmos Hub team's activities follow us on the [Cosmos Hub Twitter](https://twitter.com/cosmoshub) account. - -
- -## 👏 — Supporters - -___ - -[![Stargazers repo roster for @cosmos/gaia](https://reporoster.com/stars/cosmos/gaia)](https://github.com/cosmos/gaia/stargazers) -[![Forkers repo roster for @cosmos/gaia](https://reporoster.com/forks/cosmos/gaia)](https://github.com/cosmos/gaia/network/members) - -
- -

Animated footer bars

- -
- -

Back to top

+# AtomOne + +AtomOne is built using the [Cosmos SDK](https://github.com/cosmos/cosmos-sdk) as a fork of the +[Cosmos Hub](https://github.com/cosmos/gaia) at version [v15.2.0](https://github.com/cosmos/gaia/releases/tag/v15.2.0) (common commit hash 7281c9b). + +The following modifications have been made to the Cosmos Hub software to create AtomOne: + +TODO: review items +1. Removed x/globalfee module and revert to older and simpler fee decorator +2. Removed IBC and related modules (e.g. ICA, Packet Forwarding Middleware, etc.) +3. Removed Interchain Security module +4. Reverted to standard Cosmos SDK v0.46.16 without the Liquid Staking Module (LSM) +5. Changed Bech32 prefixes to `atone` (see `cmd/atomoned/cmd/config.go`) +6. Reduced hard-coded ante min-deposit percentage to 1% (see `ante/gov_ante.go:minInitialDepositFraction`) +7. Removed ability for validators to vote on proposals with delegations, they can only use their own stake +8. Removed community spend proposal +9. Allowed setting different voting periods for different proposal types + +## Reproducible builds + +An effort has been made to make it possible to build the exact same binary +locally as the Github Release section. To do this, checkout to the expected +version and then simply run `make build` (which will output the binary to the +`build` directory) or `make install`. The resulted binary should have the same +sha256 hash than the one from the Github Release section. diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md deleted file mode 100644 index fcd3c300..00000000 --- a/RELEASE_NOTES.md +++ /dev/null @@ -1,40 +0,0 @@ -# Gaia v15.2.0 Release Notes - -***This is a special point release in the v15 release series.*** - -## 🕐 Timeline - -**This is a mandatory upgrade for all validators and full node operators.** -The upgrade height is [19939000](https://www.mintscan.io/cosmos/block/19939000), which is approx. April 10th 2024, 15:00 CET. - -## 📝 Changelog - -Check out the [changelog](https://github.com/cosmos/gaia/blob/v15.2.0/CHANGELOG.md) for a list of relevant changes or [compare all changes](https://github.com/cosmos/gaia/compare/v15.1.0...v15.2.0) from last release. - - -Refer to the [upgrading guide](https://github.com/cosmos/gaia/blob/release/v15.2.x/UPGRADING.md) when migrating from `v15.1.x` to `v15.2.x`. - -## 🚀 Highlights - - - -This release fixes two issues identified after the v15 upgrade: - -- Increases x/gov metadata fields length to 10200. -- Fixes parsing of historic Txs with TxExtensionOptions. - -As both fixes are state breaking, a coordinated upgrade is necessary. - -## 🔨 Build from source - -You must use Golang `v1.21` if building from source. - -```bash -git clone https://github.com/cosmos/gaia -cd gaia && git checkout v15.2.0 -make install -``` - -## ⚡️ Download binaries - -Binaries for linux, darwin, and windows are available below. \ No newline at end of file diff --git a/RELEASE_PROCESS.md b/RELEASE_PROCESS.md index 760cb7fd..c58452ed 100644 --- a/RELEASE_PROCESS.md +++ b/RELEASE_PROCESS.md @@ -3,22 +3,15 @@ - [Release Process](#release-process) - [Breaking Changes](#breaking-changes) - [Major Release Procedure](#major-release-procedure) - - [Changelog](#changelog) - - [Creating a new release branch](#creating-a-new-release-branch) - - [Cutting a new release](#cutting-a-new-release) - - [Update the changelog on main](#update-the-changelog-on-main) - [Release Notes](#release-notes) - [Tagging Procedure](#tagging-procedure) - [Test building artifacts](#test-building-artifacts) - [Installing goreleaser](#installing-goreleaser) - [Non-major Release Procedure](#non-major-release-procedure) - - [Major Release Maintenance](#major-release-maintenance) - - [Stable Release Policy](#stable-release-policy) +This document outlines the release process for AtomOne. -This document outlines the release process for Cosmos Hub (Gaia). - -Gaia follows [semantic versioning](https://semver.org), but with the following deviations to account for state-machine and API breaking changes: +AtomOne follows [semantic versioning](https://semver.org), but with the following deviations to account for state-machine and API breaking changes: - State-machine breaking changes will result in an increase of the major version X (X.y.z). - Emergency releases & API breaking changes will result in an increase of the minor version Y (x.Y.z | x > 0). @@ -37,7 +30,7 @@ A change is considered to be ***API breaking*** if it modifies the provided API. ## Major Release Procedure -A _major release_ is an increment of the first number (eg: `v9.1.0` → `v10.0.0`). Each major release opens a _stable release series_ and receives updates outlined in the [Major Release Maintenance](#major-release-maintenance) section. +A _major release_ is an increment of the first number (eg: `v9.1.0` → `v10.0.0`). **Note**: Generally, PRs should target either `main` or a long-lived feature branch (see [CONTRIBUTING.md](./CONTRIBUTING.md#pull-requests)). An exception are PRs open via the Github mergify integration (i.e., backported PRs). @@ -50,7 +43,7 @@ An exception are PRs open via the Github mergify integration (i.e., backported P * Create a new version section in the `CHANGELOG.md` (follow the procedure described [below](#changelog)) * Create release notes, in `RELEASE_NOTES.md`, highlighting the new features and changes in the version. This is needed so the bot knows which entries to add to the release page on GitHub. - * (To be added in the future) ~~Additionally verify that the `UPGRADING.md` file is up to date and contains all the necessary information for upgrading to the new version.~~ + * Additionally verify that the `UPGRADING.md` file is up to date and contains all the necessary information for upgrading to the new version. * We freeze the release branch from receiving any new features and focus on releasing a release candidate. * Finish audits and reviews. * Add more tests. @@ -65,80 +58,6 @@ An exception are PRs open via the Github mergify integration (i.e., backported P * Create a new annotated git tag in the release branch (follow the [Tagging Procedure](#tagging-procedure)). This will trigger the automated release process (which will also create the release artifacts). * Once the release process completes, modify release notes if needed. -### Changelog - -For PRs that are changing production code, please add a changelog entry in `.changelog` (for details, see [contributing guidelines](./CONTRIBUTING.md#changelog)). - -To manage and generate the changelog on Gaia, we currently use [unclog](https://github.com/informalsystems/unclog). - -#### Creating a new release branch - -Unreleased changes are collected on `main` in `.changelog/unreleased/`. -However, `.changelog/` on `main` contains also existing releases (e.g., `v10.0.0`). -Thus, when creating a new release branch (e.g., `release/v11.x`), the following steps are necessary: - -- create a new release branch, e.g., `release/v11.x` - ```bash - git checkout main - git pull - git checkout -b release/v11.x - ``` -- delete all the sub-folders in `.changelog/` except `unreleased/` - ```bash - find ./.changelog -mindepth 1 -maxdepth 1 -type d -not -name unreleased | xargs rm -r - ``` -- replace the content of `.changelog/epilogue.md` with the following text - ```md - ## Previous Versions - - [CHANGELOG of previous versions](https://github.com/cosmos/gaia/blob/main/CHANGELOG.md) - ``` -- push the release branch upstream - ```bash - git push - ``` - -#### Cutting a new release - -Before cutting a _**release candidate**_ (e.g., `v11.0.0-rc0`), the following steps are necessary: - -- move to the release branch, e.g., `release/v11.x` - ```bash - git checkout release/v11.x - ``` -- move all entries in ".changelog/unreleased" to the release version, e.g., `v11.0.0`, i.e., - ```bash - unclog release v11.0.0 - ``` -- update `CHANGELOG.md`, i.e., - ```bash - unclog build > CHANGELOG.md - ``` -- open a PR (from this new created branch) against the release branch, e.g., `release/v11.x` - -Now you can cut the release candidate, e.g., v11.0.0-rc0 (follow the [Tagging Procedure](#tagging-procedure)). - -#### Update the changelog on main - -Once the **final release** is cut, the new changelog section must be added to main: - -- checkout a new branch from the `main` branch, i.e., - ```bash - git checkout main - git pull - git checkout -b /backport_changelog - ``` -- bring the new changelog section from the release branch into this branch, e.g., - ```bash - git checkout release/v11.x .changelog/v11.0.0 - ``` -- remove duplicate entries that are both in `.changelog/unreleased/` and the new changelog section, e.g., `.changelog/v11.0.0` -- update `CHANGELOG.md`, i.e., - ```bash - unclog build > CHANGELOG.md - ``` -- open a PR (from this new created branch) against `main` - ### Release Notes Release notes will be created using the `RELEASE_NOTES.md` from the release branch. @@ -147,19 +66,19 @@ Once the automated releases process is completed, please add any missing informa With every release, the `goreleaser` tool will create a file with all the build artifact checksums and upload it alongside the artifacts. The file is called `SHA256SUMS-{{.version}}.txt` and contains the following: ``` -098b00ed78ca01456c388d7f1f22d09a93927d7a234429681071b45d94730a05 gaiad_0.0.4_windows_arm64.exe -15b2b9146d99426a64c19d219234cd0fa725589c7dc84e9d4dc4d531ccc58bec gaiad_0.0.4_darwin_amd64 -604912ee7800055b0a1ac36ed31021d2161d7404cea8db8776287eb512cd67a9 gaiad_0.0.4_darwin_arm64 -76e5ff7751d66807ee85bc5301484d0f0bcc5c90582d4ba1692acefc189392be gaiad_0.0.4_linux_arm64 -bcbca82da2cb2387ad6d24c1f6401b229a9b4752156573327250d37e5cc9bb1c gaiad_0.0.4_windows_amd64.exe -f39552cbfcfb2b06f1bd66fd324af54ac9ee06625cfa652b71eba1869efe8670 gaiad_0.0.4_linux_amd64 +098b00ed78ca01456c388d7f1f22d09a93927d7a234429681071b45d94730a05 atomeoned_0.0.4_windows_arm64.exe +15b2b9146d99426a64c19d219234cd0fa725589c7dc84e9d4dc4d531ccc58bec atomeoned_0.0.4_darwin_amd64 +604912ee7800055b0a1ac36ed31021d2161d7404cea8db8776287eb512cd67a9 atomeoned_0.0.4_darwin_arm64 +76e5ff7751d66807ee85bc5301484d0f0bcc5c90582d4ba1692acefc189392be atomeoned_0.0.4_linux_arm64 +bcbca82da2cb2387ad6d24c1f6401b229a9b4752156573327250d37e5cc9bb1c atomeoned_0.0.4_windows_amd64.exe +f39552cbfcfb2b06f1bd66fd324af54ac9ee06625cfa652b71eba1869efe8670 atomeoned_0.0.4_linux_amd64 ``` ### Tagging Procedure **Important**: _**Always create tags from your local machine**_ since all release tags should be signed and annotated. -Using Github UI will create a `lightweight` tag, so it's possible that `gaiad version` returns a commit hash, instead of a tag. -This is important because most operators build from source, and having incorrect information when you run `make install && gaiad version` raises confusion. +Using Github UI will create a `lightweight` tag, so it's possible that `atomeoned version` returns a commit hash, instead of a tag. +This is important because most operators build from source, and having incorrect information when you run `make install && atomeoned version` raises confusion. The following steps are the default for tagging a specific branch commit using git on your local machine. Usually, release branches are labeled `release/v*`: @@ -211,41 +130,11 @@ Updates to the release branch should come from `main` by backporting PRs (usually done by automatic cherry pick followed by a PRs to the release branch). The backports must be marked using `backport/Y` label in PR for main. It is the PR author's responsibility to fix merge conflicts, update changelog entries, and -ensure CI passes. If a PR originates from an external contributor, a member of the stewarding team assumes +ensure CI passes. If a PR originates from an external contributor, a member of the codeowners assumes responsibility to perform this process instead of the original author. -Lastly, it is the stewarding team's responsibility to ensure that the PR meets all the Stable Release Update (SRU) criteria. - -Non-major Release must follow the [Stable Release Policy](#stable-release-policy). After the release branch has all commits required for the next patch release: -* Update the [changelog](#changelog) and the [release notes](#release-notes). +* Update the `CHANGELOG.md` and the [release notes](#release-notes). * Create a new annotated git tag in the release branch (follow the [Tagging Procedure](#tagging-procedure)). This will trigger the automated release process (which will also create the release artifacts). * Once the release process completes, modify release notes if needed. - -## Major Release Maintenance - -Major Release series continue to receive bug fixes (released as either a Minor or a Patch Release) until they reach **End Of Life**. -Major Release series is maintained in compliance with the **Stable Release Policy** as described in this document. - -**Note**: Not every Major Release is denoted as stable releases. - -After two major releases, a supported major release will be transitioned to unsupported and will be deemed EOL with no further updates. -For example, `release/v10.x` is deemed EOL once the network upgrades to `release/v12.x`. - -## Stable Release Policy - -Once a Gaia release has been completed and published, updates for it are released under certain circumstances -and must follow the [Non-major Release Procedure](#non-major-release-procedure). - -The intention of the Stable Release Policy is to ensure that all major release series that are not EOL, -are maintained with the following categories of fixes: - -- Tooling improvements (including code formatting, linting, static analysis and updates to testing frameworks) -- Performance enhancements for running archival and synching nodes -- Test and benchmarking suites, ensuring that fixes are sound and there are no performance regressions -- Library updates including point releases for core libraries such as IBC-Go, Cosmos SDK, Tendermint and other dependencies -- General maintenance improvements, that are deemed necessary by the stewarding team, that help align different releases and reduce the workload on the stewarding team -- Security fixes - -Issues that are likely excluded, are any issues that impact operating a block producing network. diff --git a/SECURITY.md b/SECURITY.md index 1a7a1d51..bf2d7a02 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -1,30 +1,21 @@ -## How to Report a Security Bug +# Security Policy -If you believe you have found a security vulnerability in Gaia, -you can report it to our primary vulnerability disclosure channel, the -[Cosmos HackerOne Bug Bounty program](https://hackerone.com/cosmos?type=team). +All in Bits strives to contribute toward the security of our ecosystem through +internal security practices, and by working with external security researchers +from the community. -If you prefer to report an issue via email, you may send a bug report to -security@interchain.io with the issue details, reproduction, impact, and other -information. Please submit only one unique email thread per vulnerability. -Any issues reported via email are ineligible for bounty rewards. +## Reporting a Vulnerability -Artifacts from an email report are saved at the time the email is triaged. -Please note: our team is not able to monitor dynamic content (e.g. a Google -Docs link that is edited after receipt) throughout the lifecycle of a report. -If you would like to share additional information or modify previous -information, please include it in an additional reply as an additional attachment. +If you've identified a vulnerability, please report it through one of the +following venues: +* Submit an advisory through GitHub: https://github.com/atomone-hub/atomone/security/advisories/new +* Email security [at-symbol] tedermint [dot] com. If you are concerned about + confidentiality e.g. because of a high-severity issue, you may email us for + PGP or Signal contact details. +* We provide bug bounty rewards through our program at + [HackenProof](https://hackenproof.com/all-in-bits). You must report via + HackenProof in order to be eligible for rewards. -***Please DO NOT file a public issue in this repository to report a security vulnerability.*** +We will respond within 3 business days to all received reports. - -## Coordinated Vulnerability Disclosure Policy and Safe Harbor - -For the most up-to-date version of the policies that govern vulnerability -disclosure, please consult the [HackerOne program page](https://hackerone.com/cosmos?type=team&view_policy=true). - -The policy hosted on HackerOne is the official Coordinated Vulnerability -Disclosure policy and Safe Harbor for the Interchain Stack, and the teams and -infrastructure it supports, and it supersedes previous security policies that -have been used in the past by individual teams and projects with targets in -scope of the program. +Thank you for helping to keep our ecosystem safe! diff --git a/UPGRADING.md b/UPGRADING.md index 8b256abe..7e3afd7c 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -1,160 +1,3 @@ -# Upgrade Gaia from v15.1.0 to v15.2.0 +# Upgrade AtomOne -## This is a coordinated upgrade. IT IS CONSENSUS BREAKING, so please apply the fix only on height 19939000. - -### Release Details -* https://github.com/cosmos/gaia/releases/tag/v15.2.0 -* Chain upgrade height : `19939000`. Exact upgrade time can be checked [here](https://www.mintscan.io/cosmos/block/19939000). -* Go version has been frozen at `1.21`. If you are going to build `gaiad` binary from source, make sure you are using the right GO version! - -# Performing the co-ordinated upgrade - -This co-ordinated upgrades requires validators to stop their validators at `halt-height`, switch their binary to `v15.2.0` and restart their nodes with the new version. - -The exact sequence of steps depends on your configuration. Please take care to modify your configuration appropriately if your setup is not included in the instructions. - -# Manual steps - -## Step 1: Configure `halt-height` using v15.1.0 and restart the node. - -This upgrade requires `gaiad` halting execution at a pre-selected `halt-height`. Failing to stop at `halt-height` may cause a consensus failure during chain execution at a later time. - -There are two mutually exclusive options for this stage: - -### Option 1: Set the halt height by modifying `app.toml` - -* Stop the gaiad process. - -* Edit the application configuration file at `~/.gaia/config/app.toml` so that `halt-height` reflects the upgrade plan: - -```toml -# Note: Commitment of state will be attempted on the corresponding block. -halt-height = 19939000 -``` -* restart gaiad process - -* Wait for the upgrade height and confirm that the node has halted - -### Option 2: Restart the `gaiad` binary with command line flags - -* Stop the gaiad process. - -* Do not modify `app.toml`. Restart the `gaiad` process with the flag `--halt-height`: -```shell -gaiad start --halt-height 19939000 -``` - -* Wait for the upgrade height and confirm that the node has halted - -Upon reaching the `halt-height` you need to replace the `v15.1.0` gaiad binary with the new `gaiad v15.2.0` binary and remove the `halt-height` constraint. -Depending on your setup, you may need to set `halt-height = 0` in your `app.toml` before resuming operations. -```shell - git clone https://github.com/cosmos/gaia.git -``` - -## Step 2: Build and start the v15.2.0 binary - -### Remember to revert `gaiad` configurations -* Reset `halt-height = 0` option in the `app.toml` or -* Remove it from start parameters of the gaiad binary before restarting the node - -We recommend you perform a backup of your data directory before switching to `v15.2.0`. - -```shell -cd $HOME/gaia -git pull -git fetch --tags -git checkout v15.2.0 -make install - -# verify install -gaiad version -# v15.2.0 -``` - -```shell -gaiad start # starts the v15.2.0 node -``` - -# Cosmovisor steps - -## Prerequisite: Alter systemd service configuration - -Disable automatic restart of the node service. To do so please alter your `gaiad.service` file configuration and set appropriate lines to following values. - -``` -Restart=no - -Environment="DAEMON_ALLOW_DOWNLOAD_BINARIES=false" -Environment="DAEMON_RESTART_AFTER_UPGRADE=false" -``` - -After that you will need to run `sudo systemctl daemon-reload` to apply changes in the service configuration. - -There is no need to restart the node yet; these changes will get applied during the node restart in the next step. - -## Setup Cosmovisor -### Create the updated gaiad binary of v15.2.0 - -### Remember to revert `gaiad` configurations -* Reset `halt-height = 0` option in the `app.toml` or -* Remove it from start parameters of the gaiad binary before starting the node - -#### Go to gaiad directory if present else clone the repository - -```shell - git clone https://github.com/cosmos/gaia.git -``` - -#### Follow these steps if gaiad repo already present - -```shell - cd $HOME/.gaia - git pull - git fetch --tags - git checkout v15.2.0 - make install -``` - -#### Check the new gaiad version, verify the latest commit hash -```shell - $ gaiad version --long - name: gaiad - server_name: gaiad - version: 15.2.0 - commit: - ... -``` - -#### Or check checksum of the binary if you decided to download it - -Checksums can be found on the official release page: -* https://github.com/cosmos/gaia/releases/tag/v15.2.0 - -The checksums file is located in the `Assets` section: -* e.g. [SHA256SUMS-v15.2.0.txt](https://github.com/cosmos/gaia/releases/download/v15.2.0/SHA256SUMS-v15.2.0.txt) - -```shell -$ shasum -a 256 gaiad-v15.2.0-linux-amd64 - gaiad-v15.2.0-linux-amd64 -``` - -### Copy the new gaiad (v15.2.0) binary to cosmovisor current directory -```shell - cp $GOPATH/bin/gaiad ~/.gaiad/cosmovisor/current/bin -``` - -### Restore service file settings - -If you are using a service file, restore the previous `Restart` settings in your service file: -``` -Restart=On-failure -``` -Reload the service control `sudo systemctl daemon-reload`. - -# Revert `gaiad` configurations - -Depending on which path you chose for Step 1, either: - -* Reset `halt-height = 0` option in the `app.toml` or -* Remove it from start parameters of the gaiad binary and start node again \ No newline at end of file +This guide provides instructions for upgrading to specific versions of AtomOne. diff --git a/ante/ante.go b/ante/ante.go index b82f0604..cc445578 100644 --- a/ante/ante.go +++ b/ante/ante.go @@ -12,8 +12,8 @@ import ( paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper" - gaiaerrors "github.com/cosmos/gaia/v15/types/errors" - gaiafeeante "github.com/cosmos/gaia/v15/x/globalfee/ante" + atomoneerrors "github.com/atomone-hub/atomone/types/errors" + atomonefeeante "github.com/atomone-hub/atomone/x/globalfee/ante" ) // HandlerOptions extend the SDK's AnteHandler options by requiring the IBC @@ -29,24 +29,24 @@ type HandlerOptions struct { func NewAnteHandler(opts HandlerOptions) (sdk.AnteHandler, error) { if opts.AccountKeeper == nil { - return nil, errorsmod.Wrap(gaiaerrors.ErrLogic, "account keeper is required for AnteHandler") + return nil, errorsmod.Wrap(atomoneerrors.ErrLogic, "account keeper is required for AnteHandler") } if opts.BankKeeper == nil { - return nil, errorsmod.Wrap(gaiaerrors.ErrLogic, "bank keeper is required for AnteHandler") + return nil, errorsmod.Wrap(atomoneerrors.ErrLogic, "bank keeper is required for AnteHandler") } if opts.SignModeHandler == nil { - return nil, errorsmod.Wrap(gaiaerrors.ErrLogic, "sign mode handler is required for AnteHandler") + return nil, errorsmod.Wrap(atomoneerrors.ErrLogic, "sign mode handler is required for AnteHandler") } if opts.IBCkeeper == nil { - return nil, errorsmod.Wrap(gaiaerrors.ErrLogic, "IBC keeper is required for AnteHandler") + return nil, errorsmod.Wrap(atomoneerrors.ErrLogic, "IBC keeper is required for AnteHandler") } if opts.GlobalFeeSubspace.Name() == "" { - return nil, errorsmod.Wrap(gaiaerrors.ErrNotFound, "globalfee param store is required for AnteHandler") + return nil, errorsmod.Wrap(atomoneerrors.ErrNotFound, "globalfee param store is required for AnteHandler") } if opts.StakingKeeper == nil { - return nil, errorsmod.Wrap(gaiaerrors.ErrNotFound, "staking param store is required for AnteHandler") + return nil, errorsmod.Wrap(atomoneerrors.ErrNotFound, "staking param store is required for AnteHandler") } sigGasConsumer := opts.SigGasConsumer @@ -62,7 +62,7 @@ func NewAnteHandler(opts HandlerOptions) (sdk.AnteHandler, error) { ante.NewValidateMemoDecorator(opts.AccountKeeper), ante.NewConsumeGasForTxSizeDecorator(opts.AccountKeeper), NewGovVoteDecorator(opts.Codec, opts.StakingKeeper), - gaiafeeante.NewFeeDecorator(opts.GlobalFeeSubspace, opts.StakingKeeper), + atomonefeeante.NewFeeDecorator(opts.GlobalFeeSubspace, opts.StakingKeeper), ante.NewDeductFeeDecorator(opts.AccountKeeper, opts.BankKeeper, opts.FeegrantKeeper, opts.TxFeeChecker), ante.NewSetPubKeyDecorator(opts.AccountKeeper), // SetPubKeyDecorator must be called before all signature verification decorators ante.NewValidateSigCountDecorator(opts.AccountKeeper), diff --git a/ante/gov_vote_ante.go b/ante/gov_vote_ante.go index abf6f930..cd8992e6 100644 --- a/ante/gov_vote_ante.go +++ b/ante/gov_vote_ante.go @@ -11,7 +11,7 @@ import ( stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper" stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" - gaiaerrors "github.com/cosmos/gaia/v15/types/errors" + atomoneerrors "github.com/atomone-hub/atomone/types/errors" ) var ( @@ -104,7 +104,7 @@ func (g GovVoteDecorator) ValidateVoteMsgs(ctx sdk.Context, msgs []sdk.Msg) erro }) if !enoughStake { - return errorsmod.Wrapf(gaiaerrors.ErrInsufficientStake, "insufficient stake for voting - min required %v", minStakedTokens) + return errorsmod.Wrapf(atomoneerrors.ErrInsufficientStake, "insufficient stake for voting - min required %v", minStakedTokens) } return nil @@ -114,7 +114,7 @@ func (g GovVoteDecorator) ValidateVoteMsgs(ctx sdk.Context, msgs []sdk.Msg) erro for _, v := range execMsg.Msgs { var innerMsg sdk.Msg if err := g.cdc.UnpackAny(v, &innerMsg); err != nil { - return errorsmod.Wrap(gaiaerrors.ErrUnauthorized, "cannot unmarshal authz exec msgs") + return errorsmod.Wrap(atomoneerrors.ErrUnauthorized, "cannot unmarshal authz exec msgs") } if err := validMsg(innerMsg); err != nil { return err diff --git a/ante/gov_vote_ante_test.go b/ante/gov_vote_ante_test.go index da1345a3..7c5c70c1 100644 --- a/ante/gov_vote_ante_test.go +++ b/ante/gov_vote_ante_test.go @@ -15,17 +15,17 @@ import ( govv1beta1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1" stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" - "github.com/cosmos/gaia/v15/ante" - "github.com/cosmos/gaia/v15/app/helpers" + "github.com/atomone-hub/atomone/ante" + "github.com/atomone-hub/atomone/app/helpers" ) // Test that the GovVoteDecorator rejects v1beta1 vote messages from accounts with less than 1 atom staked // Submitting v1beta1.VoteMsg should not be possible through the CLI, but it's still possible to craft a transaction func TestVoteSpamDecoratorGovV1Beta1(t *testing.T) { - gaiaApp := helpers.Setup(t) - ctx := gaiaApp.NewUncachedContext(true, tmproto.Header{}) - decorator := ante.NewGovVoteDecorator(gaiaApp.AppCodec(), gaiaApp.StakingKeeper) - stakingKeeper := gaiaApp.StakingKeeper + atomoneApp := helpers.Setup(t) + ctx := atomoneApp.NewUncachedContext(true, tmproto.Header{}) + decorator := ante.NewGovVoteDecorator(atomoneApp.AppCodec(), atomoneApp.StakingKeeper) + stakingKeeper := atomoneApp.StakingKeeper // Get validator valAddr1 := stakingKeeper.GetAllValidators(ctx)[0].GetOperator() @@ -49,7 +49,7 @@ func TestVoteSpamDecoratorGovV1Beta1(t *testing.T) { require.NoError(t, err) // Get delegator (this account was created during setup) - addr := gaiaApp.AccountKeeper.GetAccountAddressByID(ctx, 0) + addr := atomoneApp.AccountKeeper.GetAccountAddressByID(ctx, 0) delegator, err := sdk.AccAddressFromBech32(addr) require.NoError(t, err) @@ -136,10 +136,10 @@ func TestVoteSpamDecoratorGovV1Beta1(t *testing.T) { // Test that the GovVoteDecorator rejects v1 vote messages from accounts with less than 1 atom staked // Usually, only v1.VoteMsg can be submitted using the CLI. func TestVoteSpamDecoratorGovV1(t *testing.T) { - gaiaApp := helpers.Setup(t) - ctx := gaiaApp.NewUncachedContext(true, tmproto.Header{}) - decorator := ante.NewGovVoteDecorator(gaiaApp.AppCodec(), gaiaApp.StakingKeeper) - stakingKeeper := gaiaApp.StakingKeeper + atomoneApp := helpers.Setup(t) + ctx := atomoneApp.NewUncachedContext(true, tmproto.Header{}) + decorator := ante.NewGovVoteDecorator(atomoneApp.AppCodec(), atomoneApp.StakingKeeper) + stakingKeeper := atomoneApp.StakingKeeper // Get validator valAddr1 := stakingKeeper.GetAllValidators(ctx)[0].GetOperator() @@ -163,7 +163,7 @@ func TestVoteSpamDecoratorGovV1(t *testing.T) { require.NoError(t, err) // Get delegator (this account was created during setup) - addr := gaiaApp.AccountKeeper.GetAccountAddressByID(ctx, 0) + addr := atomoneApp.AccountKeeper.GetAccountAddressByID(ctx, 0) delegator, err := sdk.AccAddressFromBech32(addr) require.NoError(t, err) diff --git a/app/app.go b/app/app.go index 9165aaeb..947a2f5d 100644 --- a/app/app.go +++ b/app/app.go @@ -1,4 +1,4 @@ -package gaia +package atomone import ( "fmt" @@ -51,31 +51,30 @@ import ( govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" - gaiaante "github.com/cosmos/gaia/v15/ante" - "github.com/cosmos/gaia/v15/app/keepers" - "github.com/cosmos/gaia/v15/app/params" - "github.com/cosmos/gaia/v15/app/upgrades" - v15 "github.com/cosmos/gaia/v15/app/upgrades/v15" - "github.com/cosmos/gaia/v15/x/globalfee" + atomoneante "github.com/atomone-hub/atomone/ante" + "github.com/atomone-hub/atomone/app/keepers" + "github.com/atomone-hub/atomone/app/params" + "github.com/atomone-hub/atomone/app/upgrades" + "github.com/atomone-hub/atomone/x/globalfee" ) var ( // DefaultNodeHome default home directories for the application daemon DefaultNodeHome string - Upgrades = []upgrades.Upgrade{v15.Upgrade} + Upgrades = []upgrades.Upgrade{} ) var ( - _ runtime.AppI = (*GaiaApp)(nil) - _ servertypes.Application = (*GaiaApp)(nil) - _ ibctesting.TestingApp = (*GaiaApp)(nil) + _ runtime.AppI = (*AtomOneApp)(nil) + _ servertypes.Application = (*AtomOneApp)(nil) + _ ibctesting.TestingApp = (*AtomOneApp)(nil) ) -// GaiaApp extends an ABCI application, but with most of its parameters exported. +// AtomOneApp extends an ABCI application, but with most of its parameters exported. // They are exported for convenience in creating helper functions, as object // capabilities aren't needed for testing. -type GaiaApp struct { //nolint: revive +type AtomOneApp struct { //nolint: revive *baseapp.BaseApp keepers.AppKeepers @@ -99,11 +98,11 @@ func init() { panic(err) } - DefaultNodeHome = filepath.Join(userHomeDir, ".gaia") + DefaultNodeHome = filepath.Join(userHomeDir, ".atomone") } -// NewGaiaApp returns a reference to an initialized Gaia. -func NewGaiaApp( +// NewAtomOneApp returns a reference to an initialized AtomOne. +func NewAtomOneApp( logger log.Logger, db dbm.DB, traceStore io.Writer, @@ -113,7 +112,7 @@ func NewGaiaApp( encodingConfig params.EncodingConfig, appOpts servertypes.AppOptions, baseAppOptions ...func(*baseapp.BaseApp), -) *GaiaApp { +) *AtomOneApp { appCodec := encodingConfig.Marshaler legacyAmino := encodingConfig.Amino interfaceRegistry := encodingConfig.InterfaceRegistry @@ -135,7 +134,7 @@ func NewGaiaApp( bApp.SetInterfaceRegistry(interfaceRegistry) bApp.SetTxEncoder(txConfig.TxEncoder()) - app := &GaiaApp{ + app := &AtomOneApp{ BaseApp: bApp, legacyAmino: legacyAmino, txConfig: txConfig, @@ -214,8 +213,8 @@ func NewGaiaApp( app.MountTransientStores(app.GetTransientStoreKey()) app.MountMemoryStores(app.GetMemoryStoreKey()) - anteHandler, err := gaiaante.NewAnteHandler( - gaiaante.HandlerOptions{ + anteHandler, err := atomoneante.NewAnteHandler( + atomoneante.HandlerOptions{ HandlerOptions: ante.HandlerOptions{ AccountKeeper: app.AccountKeeper, BankKeeper: app.BankKeeper, @@ -254,20 +253,20 @@ func NewGaiaApp( } // Name returns the name of the App -func (app *GaiaApp) Name() string { return app.BaseApp.Name() } +func (app *AtomOneApp) Name() string { return app.BaseApp.Name() } // BeginBlocker application updates every begin block -func (app *GaiaApp) BeginBlocker(ctx sdk.Context, req abci.RequestBeginBlock) abci.ResponseBeginBlock { +func (app *AtomOneApp) BeginBlocker(ctx sdk.Context, req abci.RequestBeginBlock) abci.ResponseBeginBlock { return app.mm.BeginBlock(ctx, req) } // EndBlocker application updates every end block -func (app *GaiaApp) EndBlocker(ctx sdk.Context, req abci.RequestEndBlock) abci.ResponseEndBlock { +func (app *AtomOneApp) EndBlocker(ctx sdk.Context, req abci.RequestEndBlock) abci.ResponseEndBlock { return app.mm.EndBlock(ctx, req) } // InitChainer application update at chain initialization -func (app *GaiaApp) InitChainer(ctx sdk.Context, req abci.RequestInitChain) abci.ResponseInitChain { +func (app *AtomOneApp) InitChainer(ctx sdk.Context, req abci.RequestInitChain) abci.ResponseInitChain { var genesisState GenesisState if err := tmjson.Unmarshal(req.AppStateBytes, &genesisState); err != nil { panic(err) @@ -279,12 +278,12 @@ func (app *GaiaApp) InitChainer(ctx sdk.Context, req abci.RequestInitChain) abci } // LoadHeight loads a particular height -func (app *GaiaApp) LoadHeight(height int64) error { +func (app *AtomOneApp) LoadHeight(height int64) error { return app.LoadVersion(height) } // ModuleAccountAddrs returns all the app's module account addresses. -func (app *GaiaApp) ModuleAccountAddrs() map[string]bool { +func (app *AtomOneApp) ModuleAccountAddrs() map[string]bool { modAccAddrs := make(map[string]bool) for acc := range maccPerms { modAccAddrs[authtypes.NewModuleAddress(acc).String()] = true @@ -295,7 +294,7 @@ func (app *GaiaApp) ModuleAccountAddrs() map[string]bool { // BlockedModuleAccountAddrs returns all the app's blocked module account // addresses. -func (app *GaiaApp) BlockedModuleAccountAddrs(modAccAddrs map[string]bool) map[string]bool { +func (app *AtomOneApp) BlockedModuleAccountAddrs(modAccAddrs map[string]bool) map[string]bool { // remove module accounts that are ALLOWED to received funds delete(modAccAddrs, authtypes.NewModuleAddress(govtypes.ModuleName).String()) @@ -305,35 +304,35 @@ func (app *GaiaApp) BlockedModuleAccountAddrs(modAccAddrs map[string]bool) map[s return modAccAddrs } -// LegacyAmino returns GaiaApp's amino codec. +// LegacyAmino returns AtomOneApp's amino codec. // // NOTE: This is solely to be used for testing purposes as it may be desirable // for modules to register their own custom testing types. -func (app *GaiaApp) LegacyAmino() *codec.LegacyAmino { +func (app *AtomOneApp) LegacyAmino() *codec.LegacyAmino { return app.legacyAmino } -// AppCodec returns Gaia's app codec. +// AppCodec returns AtomOne's app codec. // // NOTE: This is solely to be used for testing purposes as it may be desirable // for modules to register their own custom testing types. -func (app *GaiaApp) AppCodec() codec.Codec { +func (app *AtomOneApp) AppCodec() codec.Codec { return app.appCodec } -// InterfaceRegistry returns Gaia's InterfaceRegistry -func (app *GaiaApp) InterfaceRegistry() types.InterfaceRegistry { +// InterfaceRegistry returns AtomOne's InterfaceRegistry +func (app *AtomOneApp) InterfaceRegistry() types.InterfaceRegistry { return app.interfaceRegistry } // SimulationManager implements the SimulationApp interface -func (app *GaiaApp) SimulationManager() *module.SimulationManager { +func (app *AtomOneApp) SimulationManager() *module.SimulationManager { return app.sm } // RegisterAPIRoutes registers all application module routes with the provided // API server. -func (app *GaiaApp) RegisterAPIRoutes(apiSvr *api.Server, apiConfig config.APIConfig) { +func (app *AtomOneApp) RegisterAPIRoutes(apiSvr *api.Server, apiConfig config.APIConfig) { clientCtx := apiSvr.ClientCtx // Register new tx routes from grpc-gateway. authtx.RegisterGRPCGatewayRoutes(clientCtx, apiSvr.GRPCGatewayRouter) @@ -353,17 +352,17 @@ func (app *GaiaApp) RegisterAPIRoutes(apiSvr *api.Server, apiConfig config.APICo } // RegisterTxService allows query minimum-gas-prices in app.toml -func (app *GaiaApp) RegisterNodeService(clientCtx client.Context) { +func (app *AtomOneApp) RegisterNodeService(clientCtx client.Context) { nodeservice.RegisterNodeService(clientCtx, app.GRPCQueryRouter()) } // RegisterTxService implements the Application.RegisterTxService method. -func (app *GaiaApp) RegisterTxService(clientCtx client.Context) { +func (app *AtomOneApp) RegisterTxService(clientCtx client.Context) { authtx.RegisterTxService(app.BaseApp.GRPCQueryRouter(), clientCtx, app.BaseApp.Simulate, app.interfaceRegistry) } // RegisterTendermintService implements the Application.RegisterTendermintService method. -func (app *GaiaApp) RegisterTendermintService(clientCtx client.Context) { +func (app *AtomOneApp) RegisterTendermintService(clientCtx client.Context) { tmservice.RegisterTendermintService( clientCtx, app.BaseApp.GRPCQueryRouter(), @@ -373,7 +372,7 @@ func (app *GaiaApp) RegisterTendermintService(clientCtx client.Context) { } // configure store loader that checks if version == upgradeHeight and applies store upgrades -func (app *GaiaApp) setupUpgradeStoreLoaders() { +func (app *AtomOneApp) setupUpgradeStoreLoaders() { upgradeInfo, err := app.UpgradeKeeper.ReadUpgradeInfoFromDisk() if err != nil { panic(fmt.Sprintf("failed to read upgrade info from disk %s", err)) @@ -392,7 +391,7 @@ func (app *GaiaApp) setupUpgradeStoreLoaders() { } } -func (app *GaiaApp) setupUpgradeHandlers() { +func (app *AtomOneApp) setupUpgradeHandlers() { for _, upgrade := range Upgrades { app.UpgradeKeeper.SetUpgradeHandler( upgrade.UpgradeName, @@ -416,21 +415,21 @@ func RegisterSwaggerAPI(rtr *mux.Router) { rtr.PathPrefix("/swagger/").Handler(http.StripPrefix("/swagger/", staticServer)) } -func (app *GaiaApp) OnTxSucceeded(_ sdk.Context, _, _ string, _ []byte, _ []byte) { +func (app *AtomOneApp) OnTxSucceeded(_ sdk.Context, _, _ string, _ []byte, _ []byte) { } -func (app *GaiaApp) OnTxFailed(_ sdk.Context, _, _ string, _ []byte, _ []byte) { +func (app *AtomOneApp) OnTxFailed(_ sdk.Context, _, _ string, _ []byte, _ []byte) { } // TestingApp functions // GetBaseApp implements the TestingApp interface. -func (app *GaiaApp) GetBaseApp() *baseapp.BaseApp { +func (app *AtomOneApp) GetBaseApp() *baseapp.BaseApp { return app.BaseApp } // GetTxConfig implements the TestingApp interface. -func (app *GaiaApp) GetTxConfig() client.TxConfig { +func (app *AtomOneApp) GetTxConfig() client.TxConfig { return app.txConfig } diff --git a/app/app_helpers.go b/app/app_helpers.go index 68eaef14..0343f6f9 100644 --- a/app/app_helpers.go +++ b/app/app_helpers.go @@ -1,4 +1,4 @@ -package gaia +package atomone import ( ibckeeper "github.com/cosmos/ibc-go/v7/modules/core/keeper" @@ -12,45 +12,45 @@ import ( // ProviderApp interface implementations for icstest tests // GetProviderKeeper implements the ProviderApp interface. -func (app *GaiaApp) GetProviderKeeper() ibcproviderkeeper.Keeper { //nolint:nolintlint +func (app *AtomOneApp) GetProviderKeeper() ibcproviderkeeper.Keeper { //nolint:nolintlint return app.ProviderKeeper } // GetStakingKeeper implements the TestingApp interface. Needed for ICS. -func (app *GaiaApp) GetStakingKeeper() ibctestingtypes.StakingKeeper { //nolint:nolintlint +func (app *AtomOneApp) GetStakingKeeper() ibctestingtypes.StakingKeeper { //nolint:nolintlint return app.StakingKeeper } // GetIBCKeeper implements the TestingApp interface. -func (app *GaiaApp) GetIBCKeeper() *ibckeeper.Keeper { //nolint:nolintlint +func (app *AtomOneApp) GetIBCKeeper() *ibckeeper.Keeper { //nolint:nolintlint return app.IBCKeeper } // GetScopedIBCKeeper implements the TestingApp interface. -func (app *GaiaApp) GetScopedIBCKeeper() capabilitykeeper.ScopedKeeper { //nolint:nolintlint +func (app *AtomOneApp) GetScopedIBCKeeper() capabilitykeeper.ScopedKeeper { //nolint:nolintlint return app.ScopedIBCKeeper } // GetTestStakingKeeper implements the ProviderApp interface. -func (app *GaiaApp) GetTestStakingKeeper() icstest.TestStakingKeeper { //nolint:nolintlint +func (app *AtomOneApp) GetTestStakingKeeper() icstest.TestStakingKeeper { //nolint:nolintlint return app.StakingKeeper } // GetTestBankKeeper implements the ProviderApp interface. -func (app *GaiaApp) GetTestBankKeeper() icstest.TestBankKeeper { //nolint:nolintlint +func (app *AtomOneApp) GetTestBankKeeper() icstest.TestBankKeeper { //nolint:nolintlint return app.BankKeeper } // GetTestSlashingKeeper implements the ProviderApp interface. -func (app *GaiaApp) GetTestSlashingKeeper() icstest.TestSlashingKeeper { //nolint:nolintlint +func (app *AtomOneApp) GetTestSlashingKeeper() icstest.TestSlashingKeeper { //nolint:nolintlint return app.SlashingKeeper } // GetTestDistributionKeeper implements the ProviderApp interface. -func (app *GaiaApp) GetTestDistributionKeeper() icstest.TestDistributionKeeper { //nolint:nolintlint +func (app *AtomOneApp) GetTestDistributionKeeper() icstest.TestDistributionKeeper { //nolint:nolintlint return app.DistrKeeper } -func (app *GaiaApp) GetTestAccountKeeper() icstest.TestAccountKeeper { //nolint:nolintlint +func (app *AtomOneApp) GetTestAccountKeeper() icstest.TestAccountKeeper { //nolint:nolintlint return app.AccountKeeper } diff --git a/app/app_test.go b/app/app_test.go index 846d2493..eb624160 100644 --- a/app/app_test.go +++ b/app/app_test.go @@ -1,4 +1,4 @@ -package gaia_test +package atomone_test import ( "testing" @@ -11,8 +11,8 @@ import ( authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" - gaia "github.com/cosmos/gaia/v15/app" - gaiahelpers "github.com/cosmos/gaia/v15/app/helpers" + atomone "github.com/atomone-hub/atomone/app" + atomonehelpers "github.com/atomone-hub/atomone/app/helpers" ) type EmptyAppOptions struct{} @@ -21,15 +21,15 @@ func (ao EmptyAppOptions) Get(_ string) interface{} { return nil } -func TestGaiaApp_BlockedModuleAccountAddrs(t *testing.T) { - encConfig := gaia.RegisterEncodingConfig() - app := gaia.NewGaiaApp( +func TestAtomOneApp_BlockedModuleAccountAddrs(t *testing.T) { + encConfig := atomone.RegisterEncodingConfig() + app := atomone.NewAtomOneApp( log.NewNopLogger(), db.NewMemDB(), nil, true, map[int64]bool{}, - gaia.DefaultNodeHome, + atomone.DefaultNodeHome, encConfig, EmptyAppOptions{}, ) @@ -40,8 +40,8 @@ func TestGaiaApp_BlockedModuleAccountAddrs(t *testing.T) { require.NotContains(t, blockedAddrs, authtypes.NewModuleAddress(govtypes.ModuleName).String()) } -func TestGaiaApp_Export(t *testing.T) { - app := gaiahelpers.Setup(t) +func TestAtomOneApp_Export(t *testing.T) { + app := atomonehelpers.Setup(t) _, err := app.ExportAppStateAndValidators(true, []string{}, []string{}) require.NoError(t, err, "ExportAppStateAndValidators should not have an error") } diff --git a/app/const.go b/app/const.go index 73af2354..156ac154 100644 --- a/app/const.go +++ b/app/const.go @@ -1,5 +1,5 @@ -package gaia +package atomone const ( - appName = "GaiaApp" + appName = "AtomOneApp" ) diff --git a/app/encoding.go b/app/encoding.go index a6daee49..99f84bee 100644 --- a/app/encoding.go +++ b/app/encoding.go @@ -1,9 +1,9 @@ -package gaia +package atomone import ( "github.com/cosmos/cosmos-sdk/std" - "github.com/cosmos/gaia/v15/app/params" + "github.com/atomone-hub/atomone/app/params" ) func RegisterEncodingConfig() params.EncodingConfig { diff --git a/app/export.go b/app/export.go index e8423cdd..dd73751f 100644 --- a/app/export.go +++ b/app/export.go @@ -1,4 +1,4 @@ -package gaia +package atomone import ( "encoding/json" @@ -14,7 +14,7 @@ import ( // ExportAppStateAndValidators exports the state of the application for a genesis // file. -func (app *GaiaApp) ExportAppStateAndValidators( +func (app *AtomOneApp) ExportAppStateAndValidators( forZeroHeight bool, jailAllowedAddrs []string, modulesToExport []string, @@ -48,7 +48,7 @@ func (app *GaiaApp) ExportAppStateAndValidators( // prepare for fresh start at zero height // NOTE zero height genesis is a temporary feature which will be deprecated // in favour of export at a block height -func (app *GaiaApp) prepForZeroHeightGenesis(ctx sdk.Context, jailAllowedAddrs []string) { +func (app *AtomOneApp) prepForZeroHeightGenesis(ctx sdk.Context, jailAllowedAddrs []string) { applyAllowedAddrs := false // check if there is a allowed address list diff --git a/app/genesis.go b/app/genesis.go index 6c7a804a..9b5270e8 100644 --- a/app/genesis.go +++ b/app/genesis.go @@ -1,9 +1,9 @@ -package gaia +package atomone import ( "encoding/json" - "github.com/cosmos/gaia/v15/app/params" + "github.com/atomone-hub/atomone/app/params" ) // The genesis state of the blockchain is represented here as a map of raw json diff --git a/app/genesis_account.go b/app/genesis_account.go index 0c170ee5..eeecc3c6 100644 --- a/app/genesis_account.go +++ b/app/genesis_account.go @@ -1,4 +1,4 @@ -package gaia +package atomone import ( "errors" diff --git a/app/genesis_account_fuzz_test.go b/app/genesis_account_fuzz_test.go index 79153cec..58412f03 100644 --- a/app/genesis_account_fuzz_test.go +++ b/app/genesis_account_fuzz_test.go @@ -1,4 +1,4 @@ -package gaia +package atomone import ( "runtime/debug" diff --git a/app/helpers/test_helpers.go b/app/helpers/test_helpers.go index 9ac54fcc..822e7aeb 100644 --- a/app/helpers/test_helpers.go +++ b/app/helpers/test_helpers.go @@ -24,16 +24,16 @@ import ( banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" - gaiaapp "github.com/cosmos/gaia/v15/app" + atomoneapp "github.com/atomone-hub/atomone/app" ) // SimAppChainID hardcoded chainID for simulation const ( - SimAppChainID = "gaia-app" + SimAppChainID = "atomone-app" ) // DefaultConsensusParams defines the default Tendermint consensus params used -// in GaiaApp testing. +// in AtomOneApp testing. var DefaultConsensusParams = &tmproto.ConsensusParams{ Block: &tmproto.BlockParams{ MaxBytes: 200000, @@ -59,7 +59,7 @@ type EmptyAppOptions struct{} func (EmptyAppOptions) Get(_ string) interface{} { return nil } -func Setup(t *testing.T) *gaiaapp.GaiaApp { +func Setup(t *testing.T) *atomoneapp.AtomOneApp { t.Helper() privVal := mock.NewPV() @@ -84,21 +84,21 @@ func Setup(t *testing.T) *gaiaapp.GaiaApp { return app } -// SetupWithGenesisValSet initializes a new GaiaApp with a validator set and genesis accounts +// SetupWithGenesisValSet initializes a new AtomOneApp with a validator set and genesis accounts // that also act as delegators. For simplicity, each validator is bonded with a delegation -// of one consensus engine unit in the default token of the GaiaApp from first genesis -// account. A Nop logger is set in GaiaApp. -func SetupWithGenesisValSet(t *testing.T, valSet *tmtypes.ValidatorSet, genAccs []authtypes.GenesisAccount, balances ...banktypes.Balance) *gaiaapp.GaiaApp { +// of one consensus engine unit in the default token of the AtomOneApp from first genesis +// account. A Nop logger is set in AtomOneApp. +func SetupWithGenesisValSet(t *testing.T, valSet *tmtypes.ValidatorSet, genAccs []authtypes.GenesisAccount, balances ...banktypes.Balance) *atomoneapp.AtomOneApp { t.Helper() - gaiaApp, genesisState := setup() - genesisState = genesisStateWithValSet(t, gaiaApp, genesisState, valSet, genAccs, balances...) + atomoneApp, genesisState := setup() + genesisState = genesisStateWithValSet(t, atomoneApp, genesisState, valSet, genAccs, balances...) stateBytes, err := json.MarshalIndent(genesisState, "", " ") require.NoError(t, err) // init chain will set the validator set and initialize the genesis accounts - gaiaApp.InitChain( + atomoneApp.InitChain( abci.RequestInitChain{ Validators: []abci.ValidatorUpdate{}, ConsensusParams: DefaultConsensusParams, @@ -107,43 +107,43 @@ func SetupWithGenesisValSet(t *testing.T, valSet *tmtypes.ValidatorSet, genAccs ) // commit genesis changes - gaiaApp.Commit() - gaiaApp.BeginBlock(abci.RequestBeginBlock{Header: tmproto.Header{ - Height: gaiaApp.LastBlockHeight() + 1, - AppHash: gaiaApp.LastCommitID().Hash, + atomoneApp.Commit() + atomoneApp.BeginBlock(abci.RequestBeginBlock{Header: tmproto.Header{ + Height: atomoneApp.LastBlockHeight() + 1, + AppHash: atomoneApp.LastCommitID().Hash, ValidatorsHash: valSet.Hash(), NextValidatorsHash: valSet.Hash(), }}) - return gaiaApp + return atomoneApp } -func setup() (*gaiaapp.GaiaApp, gaiaapp.GenesisState) { +func setup() (*atomoneapp.AtomOneApp, atomoneapp.GenesisState) { db := dbm.NewMemDB() appOptions := make(simtestutil.AppOptionsMap, 0) appOptions[server.FlagInvCheckPeriod] = 5 appOptions[server.FlagMinGasPrices] = "0uatom" - encConfig := gaiaapp.RegisterEncodingConfig() + encConfig := atomoneapp.RegisterEncodingConfig() - gaiaApp := gaiaapp.NewGaiaApp( + atomoneApp := atomoneapp.NewAtomOneApp( log.NewNopLogger(), db, nil, true, map[int64]bool{}, - gaiaapp.DefaultNodeHome, + atomoneapp.DefaultNodeHome, encConfig, appOptions, ) - return gaiaApp, gaiaapp.NewDefaultGenesisState(encConfig) + return atomoneApp, atomoneapp.NewDefaultGenesisState(encConfig) } func genesisStateWithValSet(t *testing.T, - app *gaiaapp.GaiaApp, genesisState gaiaapp.GenesisState, + app *atomoneapp.AtomOneApp, genesisState atomoneapp.GenesisState, valSet *tmtypes.ValidatorSet, genAccs []authtypes.GenesisAccount, balances ...banktypes.Balance, -) gaiaapp.GenesisState { +) atomoneapp.GenesisState { t.Helper() // set genesis accounts authGenesis := authtypes.NewGenesisState(authtypes.DefaultParams(), genAccs) diff --git a/app/keepers/keepers.go b/app/keepers/keepers.go index e24292cc..3bbd2b18 100644 --- a/app/keepers/keepers.go +++ b/app/keepers/keepers.go @@ -68,7 +68,7 @@ import ( upgradekeeper "github.com/cosmos/cosmos-sdk/x/upgrade/keeper" upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" - "github.com/cosmos/gaia/v15/x/globalfee" + "github.com/atomone-hub/atomone/x/globalfee" ) type AppKeepers struct { diff --git a/app/modules.go b/app/modules.go index cecdf1a7..04fad902 100644 --- a/app/modules.go +++ b/app/modules.go @@ -1,4 +1,4 @@ -package gaia +package atomone import ( pfmrouter "github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v7/packetforward" @@ -55,10 +55,10 @@ import ( upgradeclient "github.com/cosmos/cosmos-sdk/x/upgrade/client" upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" - gaiaappparams "github.com/cosmos/gaia/v15/app/params" - "github.com/cosmos/gaia/v15/x/globalfee" - "github.com/cosmos/gaia/v15/x/metaprotocols" - metaprotocolstypes "github.com/cosmos/gaia/v15/x/metaprotocols/types" + atomoneappparams "github.com/atomone-hub/atomone/app/params" + "github.com/atomone-hub/atomone/x/globalfee" + "github.com/atomone-hub/atomone/x/metaprotocols" + metaprotocolstypes "github.com/atomone-hub/atomone/x/metaprotocols/types" ) var maccPerms = map[string][]string{ @@ -117,8 +117,8 @@ var ModuleBasics = module.NewBasicManager( ) func appModules( - app *GaiaApp, - encodingConfig gaiaappparams.EncodingConfig, + app *AtomOneApp, + encodingConfig atomoneappparams.EncodingConfig, skipGenesisInvariants bool, ) []module.AppModule { appCodec := encodingConfig.Marshaler @@ -159,8 +159,8 @@ func appModules( // simulationModules returns modules for simulation manager // define the order of the modules for deterministic simulations func simulationModules( - app *GaiaApp, - encodingConfig gaiaappparams.EncodingConfig, + app *AtomOneApp, + encodingConfig atomoneappparams.EncodingConfig, _ bool, ) []module.AppModuleSimulation { appCodec := encodingConfig.Marshaler diff --git a/app/params/doc.go b/app/params/doc.go index 49b5f6d1..7edf586d 100644 --- a/app/params/doc.go +++ b/app/params/doc.go @@ -1,5 +1,5 @@ /* -Package params defines the simulation parameters in the gaia. +Package params defines the simulation parameters in the atomone. It contains the default weights used for each transaction used on the module's simulation. These weights define the chance for a transaction to be simulated at diff --git a/app/sim/sim_state.go b/app/sim/sim_state.go index 31a52668..b1485270 100644 --- a/app/sim/sim_state.go +++ b/app/sim/sim_state.go @@ -22,8 +22,8 @@ import ( banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" - gaia "github.com/cosmos/gaia/v15/app" - "github.com/cosmos/gaia/v15/app/params" + atomone "github.com/atomone-hub/atomone/app" + "github.com/atomone-hub/atomone/app/params" ) // Simulation parameter constants @@ -158,7 +158,7 @@ func AppStateRandomizedFn( ) (json.RawMessage, []simtypes.Account) { numAccs := int64(len(accs)) cdc := encConfig.Marshaler - genesisState := gaia.NewDefaultGenesisState(encConfig) + genesisState := atomone.NewDefaultGenesisState(encConfig) // generate a random amount of initial stake coins and a random initial // number of bonded accounts @@ -225,7 +225,7 @@ func AppStateFromGenesisFileFn(r io.Reader, cdc codec.JSONCodec, genesisFile str panic(err) } - var appState gaia.GenesisState + var appState atomone.GenesisState err = json.Unmarshal(genesis.AppState, &appState) if err != nil { panic(err) diff --git a/app/sim/sim_utils.go b/app/sim/sim_utils.go index 6499ca82..958807be 100644 --- a/app/sim/sim_utils.go +++ b/app/sim/sim_utils.go @@ -12,12 +12,12 @@ import ( "github.com/cosmos/cosmos-sdk/types/module" simtypes "github.com/cosmos/cosmos-sdk/types/simulation" - gaia "github.com/cosmos/gaia/v15/app" + atomone "github.com/atomone-hub/atomone/app" ) // SimulationOperations retrieves the simulation params from the provided file path // and returns all the modules weighted operations -func SimulationOperations(app *gaia.GaiaApp, cdc codec.JSONCodec, config simtypes.Config) []simtypes.WeightedOperation { +func SimulationOperations(app *atomone.AtomOneApp, cdc codec.JSONCodec, config simtypes.Config) []simtypes.WeightedOperation { simState := module.SimulationState{ AppParams: make(simtypes.AppParams), Cdc: cdc, diff --git a/app/sim_bench_test.go b/app/sim_bench_test.go index eb0aad65..651a772d 100644 --- a/app/sim_bench_test.go +++ b/app/sim_bench_test.go @@ -1,4 +1,4 @@ -package gaia_test +package atomone_test import ( "os" @@ -13,12 +13,12 @@ import ( "github.com/cosmos/cosmos-sdk/x/simulation" simcli "github.com/cosmos/cosmos-sdk/x/simulation/client/cli" - gaia "github.com/cosmos/gaia/v15/app" - "github.com/cosmos/gaia/v15/app/sim" + atomone "github.com/atomone-hub/atomone/app" + "github.com/atomone-hub/atomone/app/sim" ) // Profile with: -// /usr/local/go/bin/go test -benchmem -run=^$ github.com/cosmos/cosmos-sdk/GaiaApp -bench ^BenchmarkFullAppSimulation$ -Commit=true -cpuprofile cpu.out +// /usr/local/go/bin/go test -benchmem -run=^$ github.com/cosmos/cosmos-sdk/AtomOneApp -bench ^BenchmarkFullAppSimulation$ -Commit=true -cpuprofile cpu.out func BenchmarkFullAppSimulation(b *testing.B) { b.ReportAllocs() @@ -42,15 +42,15 @@ func BenchmarkFullAppSimulation(b *testing.B) { appOptions := make(simtestutil.AppOptionsMap, 0) appOptions[server.FlagInvCheckPeriod] = simcli.FlagPeriodValue - encConfig := gaia.RegisterEncodingConfig() + encConfig := atomone.RegisterEncodingConfig() - app := gaia.NewGaiaApp( + app := atomone.NewAtomOneApp( logger, db, nil, true, map[int64]bool{}, - gaia.DefaultNodeHome, + atomone.DefaultNodeHome, encConfig, appOptions, interBlockCacheOpt(), diff --git a/app/sim_test.go b/app/sim_test.go index 7c7fa8d2..3e679843 100644 --- a/app/sim_test.go +++ b/app/sim_test.go @@ -1,4 +1,4 @@ -package gaia_test +package atomone_test import ( "encoding/json" @@ -23,15 +23,13 @@ import ( "github.com/cosmos/cosmos-sdk/x/simulation" simcli "github.com/cosmos/cosmos-sdk/x/simulation/client/cli" - "github.com/cosmos/gaia/v15/ante" - gaia "github.com/cosmos/gaia/v15/app" - // "github.com/cosmos/gaia/v11/app/helpers" - // "github.com/cosmos/gaia/v11/app/params" - "github.com/cosmos/gaia/v15/app/sim" + "github.com/atomone-hub/atomone/ante" + atomone "github.com/atomone-hub/atomone/app" + "github.com/atomone-hub/atomone/app/sim" ) // AppChainID hardcoded chainID for simulation -const AppChainID = "gaia-app" +const AppChainID = "atomone-app" func init() { sim.GetSimulatorFlags() @@ -67,7 +65,7 @@ func TestAppStateDeterminism(t *testing.T) { appHashList := make([]json.RawMessage, numTimesToRunPerSeed) appOptions := make(simtestutil.AppOptionsMap, 0) - appOptions[flags.FlagHome] = gaia.DefaultNodeHome + appOptions[flags.FlagHome] = atomone.DefaultNodeHome appOptions[server.FlagInvCheckPeriod] = sim.FlagPeriodValue for i := 0; i < numSeeds; i++ { @@ -86,14 +84,14 @@ func TestAppStateDeterminism(t *testing.T) { } db := dbm.NewMemDB() - encConfig := gaia.RegisterEncodingConfig() - app := gaia.NewGaiaApp( + encConfig := atomone.RegisterEncodingConfig() + app := atomone.NewAtomOneApp( logger, db, nil, true, map[int64]bool{}, - gaia.DefaultNodeHome, + atomone.DefaultNodeHome, encConfig, appOptions, interBlockCacheOpt(), @@ -115,7 +113,7 @@ func TestAppStateDeterminism(t *testing.T) { t, os.Stdout, app.BaseApp, - simtestutil.AppStateFn(app.AppCodec(), app.SimulationManager(), gaia.NewDefaultGenesisState(encConfig)), + simtestutil.AppStateFn(app.AppCodec(), app.SimulationManager(), atomone.NewDefaultGenesisState(encConfig)), simulation2.RandomAccounts, // Replace with own random account function if using keys other than secp256k1 simtestutil.SimulationOperations(app, app.AppCodec(), config), blockedAddresses, diff --git a/app/upgrades/types.go b/app/upgrades/types.go index 06ba7265..223b076b 100644 --- a/app/upgrades/types.go +++ b/app/upgrades/types.go @@ -6,7 +6,7 @@ import ( "github.com/cosmos/cosmos-sdk/types/module" upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" - "github.com/cosmos/gaia/v15/app/keepers" + "github.com/atomone-hub/atomone/app/keepers" ) // Upgrade defines a struct containing necessary fields that a SoftwareUpgradeProposal diff --git a/app/upgrades/v10/constants.go b/app/upgrades/v10/constants.go deleted file mode 100644 index 04cfab5f..00000000 --- a/app/upgrades/v10/constants.go +++ /dev/null @@ -1,15 +0,0 @@ -package v10 - -import ( - "github.com/cosmos/gaia/v15/app/upgrades" -) - -const ( - // UpgradeName defines the on-chain upgrade name. - UpgradeName = "v10" -) - -var Upgrade = upgrades.Upgrade{ - UpgradeName: UpgradeName, - CreateUpgradeHandler: CreateUpgradeHandler, -} diff --git a/app/upgrades/v10/upgrades.go b/app/upgrades/v10/upgrades.go deleted file mode 100644 index 3e362c13..00000000 --- a/app/upgrades/v10/upgrades.go +++ /dev/null @@ -1,27 +0,0 @@ -package v10 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/module" - upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" - - "github.com/cosmos/gaia/v15/app/keepers" -) - -func CreateUpgradeHandler( - mm *module.Manager, - configurator module.Configurator, - keepers *keepers.AppKeepers, -) upgradetypes.UpgradeHandler { - return func(ctx sdk.Context, plan upgradetypes.Plan, vm module.VersionMap) (module.VersionMap, error) { - ctx.Logger().Info("Starting module migrations...") - - vm, err := mm.RunMigrations(ctx, configurator, vm) - if err != nil { - return vm, err - } - - ctx.Logger().Info("Upgrade complete") - return vm, err - } -} diff --git a/app/upgrades/v11/constants.go b/app/upgrades/v11/constants.go deleted file mode 100644 index e9e3685c..00000000 --- a/app/upgrades/v11/constants.go +++ /dev/null @@ -1,15 +0,0 @@ -package v11 - -import ( - "github.com/cosmos/gaia/v15/app/upgrades" -) - -const ( - // UpgradeName defines the on-chain upgrade name. - UpgradeName = "v11" -) - -var Upgrade = upgrades.Upgrade{ - UpgradeName: UpgradeName, - CreateUpgradeHandler: CreateUpgradeHandler, -} diff --git a/app/upgrades/v11/upgrades.go b/app/upgrades/v11/upgrades.go deleted file mode 100644 index ff238011..00000000 --- a/app/upgrades/v11/upgrades.go +++ /dev/null @@ -1,27 +0,0 @@ -package v11 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/module" - upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" - - "github.com/cosmos/gaia/v15/app/keepers" -) - -func CreateUpgradeHandler( - mm *module.Manager, - configurator module.Configurator, - keepers *keepers.AppKeepers, -) upgradetypes.UpgradeHandler { - return func(ctx sdk.Context, plan upgradetypes.Plan, vm module.VersionMap) (module.VersionMap, error) { - ctx.Logger().Info("Starting module migrations...") - - vm, err := mm.RunMigrations(ctx, configurator, vm) - if err != nil { - return vm, err - } - - ctx.Logger().Info("Upgrade complete") - return vm, err - } -} diff --git a/app/upgrades/v12/constants.go b/app/upgrades/v12/constants.go deleted file mode 100644 index 5f255c84..00000000 --- a/app/upgrades/v12/constants.go +++ /dev/null @@ -1,30 +0,0 @@ -package v12 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - - "github.com/cosmos/gaia/v15/app/upgrades" -) - -const ( - // UpgradeName defines the on-chain upgrade name. - UpgradeName = "v12" -) - -var ( - // The ValidatorBondFactor dictates the cap on the liquid shares - // for a validator - determined as a multiple to their validator bond - // (e.g. ValidatorBondShares = 1000, BondFactor = 250 -> LiquidSharesCap: 250,000) - ValidatorBondFactor = sdk.NewDec(250) - // GlobalLiquidStakingCap represents a cap on the portion of stake that - // comes from liquid staking providers for a specific validator - ValidatorLiquidStakingCap = sdk.MustNewDecFromStr("0.5") // 50% - // GlobalLiquidStakingCap represents the percentage cap on - // the portion of a chain's total stake can be liquid - GlobalLiquidStakingCap = sdk.MustNewDecFromStr("0.25") // 25% -) - -var Upgrade = upgrades.Upgrade{ - UpgradeName: UpgradeName, - CreateUpgradeHandler: CreateUpgradeHandler, -} diff --git a/app/upgrades/v12/upgrades.go b/app/upgrades/v12/upgrades.go deleted file mode 100644 index f108d819..00000000 --- a/app/upgrades/v12/upgrades.go +++ /dev/null @@ -1,38 +0,0 @@ -package v12 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/module" - upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" - - "github.com/cosmos/gaia/v15/app/keepers" -) - -func CreateUpgradeHandler( - mm *module.Manager, - configurator module.Configurator, - keepers *keepers.AppKeepers, -) upgradetypes.UpgradeHandler { - return func(ctx sdk.Context, plan upgradetypes.Plan, vm module.VersionMap) (module.VersionMap, error) { - ctx.Logger().Info("Starting module migrations...") - - vm, err := mm.RunMigrations(ctx, configurator, vm) - if err != nil { - return vm, err - } - - // Set liquid staking module parameters - params := keepers.StakingKeeper.GetParams(ctx) - params.ValidatorBondFactor = ValidatorBondFactor - params.ValidatorLiquidStakingCap = ValidatorLiquidStakingCap - params.GlobalLiquidStakingCap = GlobalLiquidStakingCap - - err = keepers.StakingKeeper.SetParams(ctx, params) - if err != nil { - return vm, err - } - - ctx.Logger().Info("Upgrade complete") - return vm, nil - } -} diff --git a/app/upgrades/v13/constants.go b/app/upgrades/v13/constants.go deleted file mode 100644 index dce0714f..00000000 --- a/app/upgrades/v13/constants.go +++ /dev/null @@ -1,15 +0,0 @@ -package v13 - -import ( - "github.com/cosmos/gaia/v15/app/upgrades" -) - -const ( - // UpgradeName defines the on-chain upgrade name. - UpgradeName = "v13" -) - -var Upgrade = upgrades.Upgrade{ - UpgradeName: UpgradeName, - CreateUpgradeHandler: CreateUpgradeHandler, -} diff --git a/app/upgrades/v13/upgrades.go b/app/upgrades/v13/upgrades.go deleted file mode 100644 index ce2cdb6c..00000000 --- a/app/upgrades/v13/upgrades.go +++ /dev/null @@ -1,27 +0,0 @@ -package v13 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/module" - upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" - - "github.com/cosmos/gaia/v15/app/keepers" -) - -func CreateUpgradeHandler( - mm *module.Manager, - configurator module.Configurator, - keepers *keepers.AppKeepers, -) upgradetypes.UpgradeHandler { - return func(ctx sdk.Context, plan upgradetypes.Plan, vm module.VersionMap) (module.VersionMap, error) { - ctx.Logger().Info("Starting module migrations...") - - vm, err := mm.RunMigrations(ctx, configurator, vm) - if err != nil { - return vm, err - } - - ctx.Logger().Info("Upgrade complete") - return vm, err - } -} diff --git a/app/upgrades/v14/constants.go b/app/upgrades/v14/constants.go deleted file mode 100644 index 72653142..00000000 --- a/app/upgrades/v14/constants.go +++ /dev/null @@ -1,15 +0,0 @@ -package v14 - -import ( - "github.com/cosmos/gaia/v15/app/upgrades" -) - -const ( - // UpgradeName defines the on-chain upgrade name. - UpgradeName = "v14" -) - -var Upgrade = upgrades.Upgrade{ - UpgradeName: UpgradeName, - CreateUpgradeHandler: CreateUpgradeHandler, -} diff --git a/app/upgrades/v14/upgrades.go b/app/upgrades/v14/upgrades.go deleted file mode 100644 index 9dfc1b0f..00000000 --- a/app/upgrades/v14/upgrades.go +++ /dev/null @@ -1,32 +0,0 @@ -package v14 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/module" - upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" - - "github.com/cosmos/gaia/v15/app/keepers" -) - -func CreateUpgradeHandler( - mm *module.Manager, - configurator module.Configurator, - keepers *keepers.AppKeepers, -) upgradetypes.UpgradeHandler { - return func(ctx sdk.Context, plan upgradetypes.Plan, vm module.VersionMap) (module.VersionMap, error) { - ctx.Logger().Info("Starting module migrations...") - - vm, err := mm.RunMigrations(ctx, configurator, vm) - if err != nil { - return vm, err - } - - // Set the minimum height of a valid consumer equivocation evidence - // for the existing consumer chains: neutron-1 and stride-1 - keepers.ProviderKeeper.SetEquivocationEvidenceMinHeight(ctx, "neutron-1", 4552189) - keepers.ProviderKeeper.SetEquivocationEvidenceMinHeight(ctx, "stride-1", 6375035) - - ctx.Logger().Info("Upgrade complete") - return vm, err - } -} diff --git a/app/upgrades/v15/constants.go b/app/upgrades/v15/constants.go deleted file mode 100644 index 45182937..00000000 --- a/app/upgrades/v15/constants.go +++ /dev/null @@ -1,26 +0,0 @@ -package v15 - -import ( - store "github.com/cosmos/cosmos-sdk/store/types" - consensustypes "github.com/cosmos/cosmos-sdk/x/consensus/types" - crisistypes "github.com/cosmos/cosmos-sdk/x/crisis/types" - - "github.com/cosmos/gaia/v15/app/upgrades" -) - -const ( - // UpgradeName defines the on-chain upgrade name. - UpgradeName = "v15" -) - -var Upgrade = upgrades.Upgrade{ - UpgradeName: UpgradeName, - CreateUpgradeHandler: CreateUpgradeHandler, - StoreUpgrades: store.StoreUpgrades{ - Added: []string{ - // v47 modules - crisistypes.ModuleName, - consensustypes.ModuleName, - }, - }, -} diff --git a/app/upgrades/v15/upgrades.go b/app/upgrades/v15/upgrades.go deleted file mode 100644 index ebe18daf..00000000 --- a/app/upgrades/v15/upgrades.go +++ /dev/null @@ -1,458 +0,0 @@ -package v15 - -import ( - "fmt" - - ibctransferkeeper "github.com/cosmos/ibc-go/v7/modules/apps/transfer/keeper" - ibctransfertypes "github.com/cosmos/ibc-go/v7/modules/apps/transfer/types" - - "github.com/cosmos/cosmos-sdk/baseapp" - "github.com/cosmos/cosmos-sdk/store/prefix" - storetypes "github.com/cosmos/cosmos-sdk/store/types" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/address" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" - "github.com/cosmos/cosmos-sdk/types/module" - accountkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper" - vesting "github.com/cosmos/cosmos-sdk/x/auth/vesting/types" - bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper" - banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" - distributionkeeper "github.com/cosmos/cosmos-sdk/x/distribution/keeper" - distributiontypes "github.com/cosmos/cosmos-sdk/x/distribution/types" - govkeeper "github.com/cosmos/cosmos-sdk/x/gov/keeper" - paramstypes "github.com/cosmos/cosmos-sdk/x/params/types" - slashingkeeper "github.com/cosmos/cosmos-sdk/x/slashing/keeper" - slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types" - stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper" - stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" - upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" - - "github.com/cosmos/gaia/v15/app/keepers" -) - -// CreateUpgradeHandler returns a upgrade handler for Gaia v15 -// which executes the following migrations: -// - adhere to prop 826 which sets the minimum commission rate to 5% for all validators, -// see https://www.mintscan.io/cosmos/proposals/826 -// - update the slashing module SigningInfos for which the consensus address is empty, -// see https://github.com/cosmos/gaia/issues/1734. -// - adhere to signal prop 860 which claws back vesting funds -// see https://www.mintscan.io/cosmos/proposals/860 -// - update the transfer module's escrow accounts for which there is a discrepancy -// with the counterparty chain supply. -func CreateUpgradeHandler( - mm *module.Manager, - configurator module.Configurator, - keepers *keepers.AppKeepers, -) upgradetypes.UpgradeHandler { - return func(ctx sdk.Context, plan upgradetypes.Plan, vm module.VersionMap) (module.VersionMap, error) { - ctx.Logger().Info("Starting module migrations...") - baseAppLegacySS := keepers.ParamsKeeper.Subspace(baseapp.Paramspace). - WithKeyTable(paramstypes.ConsensusParamsKeyTable()) - baseapp.MigrateParams(ctx, baseAppLegacySS, &keepers.ConsensusParamsKeeper) - - vm, err := mm.RunMigrations(ctx, configurator, vm) - if err != nil { - return vm, err - } - - if err := UpgradeMinCommissionRate(ctx, *keepers.StakingKeeper); err != nil { - return nil, fmt.Errorf("failed migrating min commission rates: %s", err) - } - - UpgradeSigningInfos(ctx, keepers.SlashingKeeper) - - if err := ClawbackVestingFunds( - ctx, - sdk.MustAccAddressFromBech32("cosmos145hytrc49m0hn6fphp8d5h4xspwkawcuzmx498"), - keepers); err != nil { - return nil, fmt.Errorf("failed migrating vesting funds: %s", err) - } - if err := SetMinInitialDepositRatio(ctx, *keepers.GovKeeper); err != nil { - return nil, fmt.Errorf("failed initializing the min initial deposit ratio: %s", err) - } - - UpgradeEscrowAccounts(ctx, keepers.BankKeeper, keepers.TransferKeeper) - - ctx.Logger().Info("Upgrade v15 complete") - return vm, err - } -} - -// UpgradeMinCommissionRate sets the minimum commission rate staking parameter to 5% -// and updates the commission rate for all validators that have a commission rate less than 5% -// adhere to prop 826 which sets the minimum commission rate to 5% for all validators -// https://www.mintscan.io/cosmos/proposals/826 -func UpgradeMinCommissionRate(ctx sdk.Context, sk stakingkeeper.Keeper) error { - ctx.Logger().Info("Migrating min commission rate...") - - params := sk.GetParams(ctx) - params.MinCommissionRate = sdk.NewDecWithPrec(5, 2) - if err := sk.SetParams(ctx, params); err != nil { - return err - } - - for _, val := range sk.GetAllValidators(ctx) { - if val.Commission.CommissionRates.Rate.LT(sdk.NewDecWithPrec(5, 2)) { - // set the commission rate to 5% - val.Commission.CommissionRates.Rate = sdk.NewDecWithPrec(5, 2) - // set the max rate to 5% if it is less than 5% - if val.Commission.CommissionRates.MaxRate.LT(sdk.NewDecWithPrec(5, 2)) { - val.Commission.CommissionRates.MaxRate = sdk.NewDecWithPrec(5, 2) - } - val.Commission.UpdateTime = ctx.BlockHeader().Time - sk.SetValidator(ctx, val) - } - } - - ctx.Logger().Info("Finished migrating min commission rate") - return nil -} - -// UpgradeSigningInfos updates the signing infos of validators for which -// the consensus address is missing -func UpgradeSigningInfos(ctx sdk.Context, sk slashingkeeper.Keeper) { - ctx.Logger().Info("Migrating signing infos...") - - signingInfos := []slashingtypes.ValidatorSigningInfo{} - - // update consensus address in signing info - // using the store key of validators - sk.IterateValidatorSigningInfos(ctx, func(address sdk.ConsAddress, info slashingtypes.ValidatorSigningInfo) (stop bool) { - if info.Address == "" { - info.Address = address.String() - signingInfos = append(signingInfos, info) - } - - return false - }) - - for _, si := range signingInfos { - addr, err := sdk.ConsAddressFromBech32(si.Address) - if err != nil { - ctx.Logger().Error("incorrect consensus address in signing info %s: %s", si.Address, err) - continue - } - sk.SetValidatorSigningInfo(ctx, addr, si) - } - - ctx.Logger().Info("Finished migrating signing infos") -} - -// ClawbackVestingFunds transfers the vesting tokens from the given vesting account -// to the community pool -func ClawbackVestingFunds(ctx sdk.Context, address sdk.AccAddress, keepers *keepers.AppKeepers) error { - ctx.Logger().Info("Migrating vesting funds...") - - ak := keepers.AccountKeeper - bk := keepers.BankKeeper - dk := keepers.DistrKeeper - sk := *keepers.StakingKeeper - - // get target account - account := ak.GetAccount(ctx, address) - - // verify that it's a vesting account type - vestAccount, ok := account.(*vesting.ContinuousVestingAccount) - if !ok { - ctx.Logger().Error( - "failed migrating vesting funds: %s: %s", - "provided account address isn't a vesting account: ", - address.String(), - ) - - return nil - } - - // returns if the account has no vesting coins of the bond denom - vestingCoinToClawback := sdk.Coin{} - if vc := vestAccount.GetVestingCoins(ctx.BlockTime()); !vc.Empty() { - _, vestingCoinToClawback = vc.Find(sk.BondDenom(ctx)) - } - - if vestingCoinToClawback.IsNil() { - ctx.Logger().Info( - "%s: %s", - "no vesting coins to migrate", - "Finished migrating vesting funds", - ) - - return nil - } - - // unbond all delegations from vesting account - if err := forceUnbondAllDelegations(sk, bk, ctx, address); err != nil { - return err - } - - // transfers still vesting tokens of BondDenom to community pool - if err := forceFundCommunityPool( - ak, - dk, - bk, - ctx, - vestingCoinToClawback, - address, - keepers.GetKey(banktypes.StoreKey), - ); err != nil { - return err - } - - // overwrite vesting account using its embedded base account - ak.SetAccount(ctx, vestAccount.BaseAccount) - - // validate account balance - if err := bk.ValidateBalance(ctx, address); err != nil { - return err - } - - ctx.Logger().Info("Finished migrating vesting funds") - return nil -} - -// forceUnbondAllDelegations unbonds all the delegations from the given account address, -// without waiting for an unbonding period -func forceUnbondAllDelegations( - sk stakingkeeper.Keeper, - bk bankkeeper.Keeper, - ctx sdk.Context, - delegator sdk.AccAddress, -) error { - dels := sk.GetDelegatorDelegations(ctx, delegator, 100) - - for _, del := range dels { - valAddr := del.GetValidatorAddr() - - validator, found := sk.GetValidator(ctx, valAddr) - if !found { - return stakingtypes.ErrNoValidatorFound - } - - returnAmount, err := sk.Unbond(ctx, delegator, valAddr, del.GetShares()) - if err != nil { - return err - } - - coins := sdk.NewCoins(sdk.NewCoin(sk.BondDenom(ctx), returnAmount)) - - // transfer the validator tokens to the not bonded pool - if validator.IsBonded() { - // doing stakingKeeper.bondedTokensToNotBonded - err = bk.SendCoinsFromModuleToModule(ctx, stakingtypes.BondedPoolName, stakingtypes.NotBondedPoolName, coins) - if err != nil { - return err - } - } - - err = bk.UndelegateCoinsFromModuleToAccount(ctx, stakingtypes.NotBondedPoolName, delegator, coins) - if err != nil { - return err - } - } - - return nil -} - -// forceFundCommunityPool sends the given coin from the sender account to the community pool -// even if the coin is locked. -// Note that it partially follows the logic of the FundCommunityPool method in -// https://github.com/cosmos/cosmos-sdk/blob/release%2Fv0.47.x/x/distribution/keeper/keeper.go#L155 -func forceFundCommunityPool( - ak accountkeeper.AccountKeeper, - dk distributionkeeper.Keeper, - bk bankkeeper.Keeper, - ctx sdk.Context, - amount sdk.Coin, - sender sdk.AccAddress, - bs storetypes.StoreKey, -) error { - recipientAcc := ak.GetModuleAccount(ctx, distributiontypes.ModuleName) - if recipientAcc == nil { - return fmt.Errorf("%s:%s", sdkerrors.ErrUnknownAddress, distributiontypes.ModuleName) - } - - senderBal := bk.GetBalance(ctx, sender, amount.Denom) - if _, hasNeg := sdk.NewCoins(senderBal).SafeSub(amount); hasNeg { - return fmt.Errorf( - "%s: spendable balance %s is smaller than %s", - sdkerrors.ErrInsufficientFunds, - senderBal, - amount, - ) - } - if err := setBalance(ctx, sender, senderBal.Sub(amount), bs); err != nil { - return err - } - recipientBal := bk.GetBalance(ctx, recipientAcc.GetAddress(), amount.Denom) - if err := setBalance(ctx, recipientAcc.GetAddress(), recipientBal.Add(amount), bs); err != nil { - return err - } - - accExists := ak.HasAccount(ctx, recipientAcc.GetAddress()) - if !accExists { - ak.SetAccount(ctx, ak.NewAccountWithAddress(ctx, recipientAcc.GetAddress())) - } - - feePool := dk.GetFeePool(ctx) - feePool.CommunityPool = feePool.CommunityPool.Add(sdk.NewDecCoinsFromCoins(amount)...) - dk.SetFeePool(ctx, feePool) - - return nil -} - -// setBalance sets the coin balance for an account by address. -// Note that it follows the same logic of the setBalance method in -// https://github.com/cosmos/cosmos-sdk/blob/v0.47.7/x/bank/keeper/send.go#L337 -func setBalance( - ctx sdk.Context, - addr sdk.AccAddress, - balance sdk.Coin, - bs storetypes.StoreKey, -) error { - if !balance.IsValid() { - return fmt.Errorf("%s:%s", sdkerrors.ErrInvalidCoins, balance.String()) - } - - store := ctx.KVStore(bs) - accountStore := prefix.NewStore(store, banktypes.CreateAccountBalancesPrefix(addr)) - denomPrefixStore := prefix.NewStore(store, banktypes.CreateDenomAddressPrefix(balance.Denom)) - - if balance.IsZero() { - accountStore.Delete([]byte(balance.Denom)) - denomPrefixStore.Delete(address.MustLengthPrefix(addr)) - } else { - amount, err := balance.Amount.Marshal() - if err != nil { - return err - } - - accountStore.Set([]byte(balance.Denom), amount) - - // Store a reverse index from denomination to account address with a - // sentinel value. - denomAddrKey := address.MustLengthPrefix(addr) - if !denomPrefixStore.Has(denomAddrKey) { - denomPrefixStore.Set(denomAddrKey, []byte{0}) - } - } - - return nil -} - -// SetMinInitialDepositRatio sets the MinInitialDepositRatio param of the gov -// module to 10% - this is the proportion of the deposit value that must be paid -// at proposal submission. -func SetMinInitialDepositRatio(ctx sdk.Context, gk govkeeper.Keeper) error { - ctx.Logger().Info("Initializing MinInitialDepositRatio...") - - params := gk.GetParams(ctx) - params.MinInitialDepositRatio = sdk.NewDecWithPrec(1, 1).String() // 0.1 (10%) - err := gk.SetParams(ctx, params) - if err != nil { - return err - } - - ctx.Logger().Info("Finished initializing MinInitialDepositRatio...") - - return nil -} - -/* -The following is a list of the discrepancies that were found in the IBC transfer escrow accounts. -Please note that discrepancies #1 and #3 are for the same escrow account address, but for coins of -a different denomination. - -Discrepancy #1: -- Counterparty Chain ID: osmosis-1 -- Escrow Account Address: cosmos1x54ltnyg88k0ejmk8ytwrhd3ltm84xehrnlslf -- Asset Base Denom: FX -- Asset IBC Denom: ibc/4925E6ABA571A44D2BE0286D2D29AF42A294D0FF2BB16490149A1B26EAD33729 -- Escrow Balance: 8859960534331100342 -- Counterparty Total Supply: 8899960534331100342ibc/EBBE6553941A1F0111A9163F885F7665417467FB630D68F5D4F15425C1E64FDE -- Missing amount in Escrow Account: 40000000000000000 - -Discrepancy #2: -- Counterparty Chain ID: juno-1 -- Escrow Account Address: cosmos1ju6tlfclulxumtt2kglvnxduj5d93a64r5czge -- Asset Base Denom: uosmo -- Asset IBC Denom: ibc/14F9BC3E44B8A9C1BE1FB08980FAB87034C9905EF17CF2F5008FC085218811CC -- Escrow Balance: 6247328 -- Counterparty Total Supply: 6249328ibc/A065D610A42C3943FAB23979A4F969291A2CF9FE76966B8960AC34B52EFA9F62 -- Missing amount in Escrow Account: 2000 - -Discrepancy #3: -- Counterparty Chain ID: osmosis-1 -- Escrow Account Address: cosmos1x54ltnyg88k0ejmk8ytwrhd3ltm84xehrnlslf -- Asset Base Denom: rowan -- Asset IBC Denom: ibc/F5ED5F3DC6F0EF73FA455337C027FE91ABCB375116BF51A228E44C493E020A09 -- Escrow Balance: 122394170815718341733868 -- Counterparty Total Supply: 126782170815718341733868ibc/92E49910206805D48FC035A947F38ABFD5F0372F254846D9873442F3036E20AF -- Missing amount in Escrow Account: 4388000000000000000000 -*/ - -// UpgradeEscrowAccounts mints the necessary assets to reach parity between the escrow account -// and the counterparty total supply, and then, send them from the transfer module to the escrow account. -func UpgradeEscrowAccounts(ctx sdk.Context, bankKeeper bankkeeper.Keeper, transferKeeper ibctransferkeeper.Keeper) { - for _, update := range GetEscrowUpdates(ctx) { - escrowAddress := sdk.MustAccAddressFromBech32(update.Address) - for _, coin := range update.Coins { - coins := sdk.NewCoins(coin) - - if err := bankKeeper.MintCoins(ctx, ibctransfertypes.ModuleName, coins); err != nil { - ctx.Logger().Error("fail to upgrade escrow account: %s", err) - } - - if err := bankKeeper.SendCoinsFromModuleToAccount(ctx, ibctransfertypes.ModuleName, escrowAddress, coins); err != nil { - ctx.Logger().Error("fail to upgrade escrow account: %s", err) - } - - // update the transfer module's store for the total escrow amounts - currentTotalEscrow := transferKeeper.GetTotalEscrowForDenom(ctx, coin.GetDenom()) - newTotalEscrow := currentTotalEscrow.Add(coin) - transferKeeper.SetTotalEscrowForDenom(ctx, newTotalEscrow) - } - } -} - -type UpdateCoins struct { - Address string - Coins sdk.Coins -} - -func GetEscrowUpdates(ctx sdk.Context) []UpdateCoins { - escrowUpdates := []UpdateCoins{ - { - // discrepancy #1 - Address: "cosmos1x54ltnyg88k0ejmk8ytwrhd3ltm84xehrnlslf", - Coins: sdk.Coins{{ - Denom: "ibc/4925E6ABA571A44D2BE0286D2D29AF42A294D0FF2BB16490149A1B26EAD33729", - Amount: sdk.NewInt(40000000000000000), - }}, - }, - { - // discrepancy #2 - Address: "cosmos1ju6tlfclulxumtt2kglvnxduj5d93a64r5czge", - Coins: sdk.Coins{{ - Denom: "ibc/14F9BC3E44B8A9C1BE1FB08980FAB87034C9905EF17CF2F5008FC085218811CC", - Amount: sdk.NewInt(2000), - }}, - }, - } - - // For discrepancy #3, the missing amount in the escrow account is too large - // to be represented using an 64-bit integer. Therefore, it's added to the - // escrow updates list under the condition that the amount is successfully - // converted to the sdk.Int type. - if amt, ok := sdk.NewIntFromString("4388000000000000000000"); !ok { - ctx.Logger().Error("can't upgrade missing amount in escrow account: '4388000000000000000000'") - } else { - coins := escrowUpdates[0].Coins - coins = coins.Add(sdk.NewCoins(sdk.NewCoin( - "ibc/F5ED5F3DC6F0EF73FA455337C027FE91ABCB375116BF51A228E44C493E020A09", - amt, - ))...) - escrowUpdates[0].Coins = coins - } - - return escrowUpdates -} diff --git a/app/upgrades/v15/upgrades_test.go b/app/upgrades/v15/upgrades_test.go deleted file mode 100644 index 3f1aff00..00000000 --- a/app/upgrades/v15/upgrades_test.go +++ /dev/null @@ -1,320 +0,0 @@ -package v15_test - -import ( - "testing" - "time" - - "github.com/stretchr/testify/require" - - abci "github.com/cometbft/cometbft/abci/types" - tmrand "github.com/cometbft/cometbft/libs/rand" - tmproto "github.com/cometbft/cometbft/proto/tendermint/types" - tmtime "github.com/cometbft/cometbft/types/time" - - "cosmossdk.io/math" - - "github.com/cosmos/cosmos-sdk/crypto/keys/ed25519" - "github.com/cosmos/cosmos-sdk/testutil/mock" - sdk "github.com/cosmos/cosmos-sdk/types" - authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" - vesting "github.com/cosmos/cosmos-sdk/x/auth/vesting/types" - banktestutil "github.com/cosmos/cosmos-sdk/x/bank/testutil" - slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types" - stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" - - "github.com/cosmos/gaia/v15/app/helpers" - v15 "github.com/cosmos/gaia/v15/app/upgrades/v15" -) - -func TestUpgradeSigningInfos(t *testing.T) { - gaiaApp := helpers.Setup(t) - ctx := gaiaApp.NewUncachedContext(true, tmproto.Header{}) - slashingKeeper := gaiaApp.SlashingKeeper - - signingInfosNum := 8 - emptyAddrSigningInfo := make(map[string]struct{}) - - // create some dummy signing infos, half of which with an empty address field - for i := 0; i < signingInfosNum; i++ { - pubKey, err := mock.NewPV().GetPubKey() - require.NoError(t, err) - - consAddr := sdk.ConsAddress(pubKey.Address()) - info := slashingtypes.NewValidatorSigningInfo( - consAddr, - 0, - 0, - time.Unix(0, 0), - false, - 0, - ) - - if i < signingInfosNum/2 { - info.Address = "" - emptyAddrSigningInfo[consAddr.String()] = struct{}{} - } - - slashingKeeper.SetValidatorSigningInfo(ctx, consAddr, info) - require.NoError(t, err) - } - - require.Equal(t, signingInfosNum/2, len(emptyAddrSigningInfo)) - - // check that signing info are correctly set before migration - slashingKeeper.IterateValidatorSigningInfos(ctx, func(address sdk.ConsAddress, info slashingtypes.ValidatorSigningInfo) (stop bool) { - if _, ok := emptyAddrSigningInfo[address.String()]; ok { - require.Empty(t, info.Address) - } else { - require.NotEmpty(t, info.Address) - } - - return false - }) - - // upgrade signing infos - v15.UpgradeSigningInfos(ctx, slashingKeeper) - - // check that all signing info are updated as expected after migration - slashingKeeper.IterateValidatorSigningInfos(ctx, func(address sdk.ConsAddress, info slashingtypes.ValidatorSigningInfo) (stop bool) { - require.NotEmpty(t, info.Address) - - return false - }) -} - -func TestUpgradeMinCommissionRate(t *testing.T) { - gaiaApp := helpers.Setup(t) - ctx := gaiaApp.NewUncachedContext(true, tmproto.Header{}) - - // set min commission rate to 0 - stakingParams := gaiaApp.StakingKeeper.GetParams(ctx) - stakingParams.MinCommissionRate = sdk.ZeroDec() - err := gaiaApp.StakingKeeper.SetParams(ctx, stakingParams) - require.NoError(t, err) - - stakingKeeper := gaiaApp.StakingKeeper - valNum := len(stakingKeeper.GetAllValidators(ctx)) - - // create 3 new validators - for i := 0; i < 3; i++ { - pk := ed25519.GenPrivKeyFromSecret([]byte{uint8(i)}).PubKey() - val, err := stakingtypes.NewValidator( - sdk.ValAddress(pk.Address()), - pk, - stakingtypes.Description{}, - ) - require.NoError(t, err) - // set random commission rate - val.Commission.CommissionRates.Rate = sdk.NewDecWithPrec(tmrand.Int63n(100), 2) - stakingKeeper.SetValidator(ctx, val) - valNum++ - } - - validators := stakingKeeper.GetAllValidators(ctx) - require.Equal(t, valNum, len(validators)) - - // pre-test min commission rate is 0 - require.Equal(t, stakingKeeper.GetParams(ctx).MinCommissionRate, sdk.ZeroDec(), "non-zero previous min commission rate") - - // run the test and confirm the values have been updated - require.NoError(t, v15.UpgradeMinCommissionRate(ctx, *stakingKeeper)) - - newStakingParams := stakingKeeper.GetParams(ctx) - require.NotEqual(t, newStakingParams.MinCommissionRate, sdk.ZeroDec(), "failed to update min commission rate") - require.Equal(t, newStakingParams.MinCommissionRate, sdk.NewDecWithPrec(5, 2), "failed to update min commission rate") - - for _, val := range stakingKeeper.GetAllValidators(ctx) { - require.True(t, val.Commission.CommissionRates.Rate.GTE(newStakingParams.MinCommissionRate), "failed to update update commission rate for validator %s", val.GetOperator()) - } -} - -func TestClawbackVestingFunds(t *testing.T) { - gaiaApp := helpers.Setup(t) - - now := tmtime.Now() - endTime := now.Add(24 * time.Hour) - - bankKeeper := gaiaApp.BankKeeper - accountKeeper := gaiaApp.AccountKeeper - distrKeeper := gaiaApp.DistrKeeper - stakingKeeper := gaiaApp.StakingKeeper - - ctx := gaiaApp.NewUncachedContext(true, tmproto.Header{Height: 1}) - ctx = ctx.WithBlockHeader(tmproto.Header{Height: ctx.BlockHeight(), Time: now}) - - validator := stakingKeeper.GetAllValidators(ctx)[0] - bondDenom := stakingKeeper.GetParams(ctx).BondDenom - - // create continuous vesting account - origCoins := sdk.NewCoins(sdk.NewInt64Coin(bondDenom, 100)) - addr := sdk.AccAddress([]byte("cosmos145hytrc49m0hn6fphp8d5h4xspwkawcuzmx498")) - - vestingAccount := vesting.NewContinuousVestingAccount( - authtypes.NewBaseAccountWithAddress(addr), - origCoins, - now.Unix(), - endTime.Unix(), - ) - - require.True(t, vestingAccount.GetVestingCoins(now).IsEqual(origCoins)) - - accountKeeper.SetAccount(ctx, vestingAccount) - - // check vesting account balance was set correctly - require.NoError(t, bankKeeper.ValidateBalance(ctx, addr)) - require.Empty(t, bankKeeper.GetAllBalances(ctx, addr)) - - // send original vesting coin amount - require.NoError(t, banktestutil.FundAccount(bankKeeper, ctx, addr, origCoins)) - require.True(t, origCoins.IsEqual(bankKeeper.GetAllBalances(ctx, addr))) - - initBal := bankKeeper.GetAllBalances(ctx, vestingAccount.GetAddress()) - require.True(t, initBal.IsEqual(origCoins)) - - // save validator tokens - oldValTokens := validator.Tokens - - // delegate all vesting account tokens - _, err := stakingKeeper.Delegate( - ctx, - vestingAccount.GetAddress(), - origCoins.AmountOf(bondDenom), - stakingtypes.Unbonded, - validator, - true) - require.NoError(t, err) - - // check that the validator's tokens and shares increased - validator = stakingKeeper.GetAllValidators(ctx)[0] - del, found := stakingKeeper.GetDelegation(ctx, addr, validator.GetOperator()) - require.True(t, found) - require.True(t, validator.Tokens.Equal(oldValTokens.Add(origCoins.AmountOf(bondDenom)))) - require.Equal( - t, - validator.TokensFromShares(del.Shares), - math.LegacyNewDec(origCoins.AmountOf(bondDenom).Int64()), - ) - - // check vesting account delegations - vestingAccount = accountKeeper.GetAccount(ctx, addr).(*vesting.ContinuousVestingAccount) - require.Equal(t, vestingAccount.GetDelegatedVesting(), origCoins) - require.Empty(t, vestingAccount.GetDelegatedFree()) - - // check that migration succeeds when all coins are already vested - require.NoError(t, v15.ClawbackVestingFunds(ctx.WithBlockTime(endTime), addr, &gaiaApp.AppKeepers)) - - // vest half of the tokens - ctx = ctx.WithBlockTime(now.Add(12 * time.Hour)) - - currVestingCoins := vestingAccount.GetVestingCoins(ctx.BlockTime()) - currVestedCoins := vestingAccount.GetVestedCoins(ctx.BlockTime()) - - require.True(t, currVestingCoins.IsEqual(origCoins.QuoInt(math.NewInt(2)))) - require.True(t, currVestedCoins.IsEqual(origCoins.QuoInt(math.NewInt(2)))) - - // execute migration script - require.NoError(t, v15.ClawbackVestingFunds(ctx, addr, &gaiaApp.AppKeepers)) - - // check that the validator's delegation is removed and that - // their total tokens decreased - validator = stakingKeeper.GetAllValidators(ctx)[0] - _, found = stakingKeeper.GetDelegation(ctx, addr, validator.GetOperator()) - require.False(t, found) - require.Equal( - t, - validator.TokensFromShares(validator.DelegatorShares), - math.LegacyNewDec(oldValTokens.Int64()), - ) - - // verify that all modules can end/begin blocks - gaiaApp.EndBlock(abci.RequestEndBlock{}) - gaiaApp.BeginBlock( - abci.RequestBeginBlock{ - Header: tmproto.Header{ - ChainID: ctx.ChainID(), - Height: ctx.BlockHeight() + 1, - }, - }, - ) - - // check that the resulting account is of BaseAccount type now - account, ok := accountKeeper.GetAccount(ctx, addr).(*authtypes.BaseAccount) - require.True(t, ok) - // check that the account values are still the same - require.EqualValues(t, account, vestingAccount.BaseAccount) - - // check that the account's balance still has the vested tokens - require.True(t, bankKeeper.GetAllBalances(ctx, addr).IsEqual(currVestedCoins)) - // check that the community pool balance received the vesting tokens - require.True( - t, - distrKeeper.GetFeePoolCommunityCoins(ctx). - IsEqual(sdk.NewDecCoinsFromCoins(currVestingCoins...)), - ) - - // verify that normal operations work in banking and staking - _, err = stakingKeeper.Delegate( - ctx, addr, - sdk.NewInt(30), - stakingtypes.Unbonded, - validator, - true) - require.NoError(t, err) - - newAddr := sdk.AccAddress([]byte("cosmos1qqp9myctmh8mh2y7gynlsnw4y2wz3s3089dak6")) - err = bankKeeper.SendCoins( - ctx, - addr, - newAddr, - sdk.NewCoins(sdk.NewCoin(bondDenom, sdk.NewInt(10))), - ) - require.NoError(t, err) -} - -func TestSetMinInitialDepositRatio(t *testing.T) { - gaiaApp := helpers.Setup(t) - ctx := gaiaApp.NewUncachedContext(true, tmproto.Header{}) - - err := v15.SetMinInitialDepositRatio(ctx, *gaiaApp.GovKeeper) - require.NoError(t, err) - - minInitialDepositRatioStr := gaiaApp.GovKeeper.GetParams(ctx).MinInitialDepositRatio - minInitialDepositRatio, err := math.LegacyNewDecFromStr(minInitialDepositRatioStr) - require.NoError(t, err) - require.True(t, minInitialDepositRatio.Equal(sdk.NewDecWithPrec(1, 1))) -} - -func TestUpgradeEscrowAccounts(t *testing.T) { - gaiaApp := helpers.Setup(t) - ctx := gaiaApp.NewUncachedContext(true, tmproto.Header{}) - - bankKeeper := gaiaApp.BankKeeper - transferKeeper := gaiaApp.TransferKeeper - - escrowUpdates := v15.GetEscrowUpdates(ctx) - - // check escrow accounts are empty - for _, update := range escrowUpdates { - require.Empty(t, bankKeeper.GetAllBalances(ctx, sdk.MustAccAddressFromBech32(update.Address))) - for _, coin := range update.Coins { - require.Equal(t, sdk.ZeroInt(), transferKeeper.GetTotalEscrowForDenom(ctx, coin.Denom).Amount) - } - } - - // execute the upgrade - v15.UpgradeEscrowAccounts(ctx, bankKeeper, transferKeeper) - - // check that new assets are minted and transferred to the escrow accounts - numUpdate := 0 - for _, update := range escrowUpdates { - for _, coin := range update.Coins { - require.Equal(t, coin, bankKeeper.GetBalance(ctx, sdk.MustAccAddressFromBech32(update.Address), coin.Denom)) - // check that the total escrow amount for the denom is updated - require.Equal(t, coin, transferKeeper.GetTotalEscrowForDenom(ctx, coin.Denom)) - numUpdate++ - } - } - - // verify that all tree discrepancies are covered in the update - require.Equal(t, 3, numUpdate) -} diff --git a/app/upgrades/v7/constants.go b/app/upgrades/v7/constants.go deleted file mode 100644 index 539fb26e..00000000 --- a/app/upgrades/v7/constants.go +++ /dev/null @@ -1,51 +0,0 @@ -//go:build upgrade_v7 - -package v7 - -import ( - store "github.com/cosmos/cosmos-sdk/store/types" - icahosttypes "github.com/cosmos/ibc-go/v7/modules/apps/27-interchain-accounts/host/types" - - "github.com/cosmos/gaia/v15/app/upgrades" -) - -const ( - // UpgradeName defines the on-chain upgrade name. - UpgradeName = "v7-Theta" - - // allowed msg types of ica host - authzMsgExec = "/cosmos.authz.v1beta1.MsgExec" - authzMsgGrant = "/cosmos.authz.v1beta1.MsgGrant" - authzMsgRevoke = "/cosmos.authz.v1beta1.MsgRevoke" - bankMsgSend = "/cosmos.bank.v1beta1.MsgSend" - bankMsgMultiSend = "/cosmos.bank.v1beta1.MsgMultiSend" - distrMsgSetWithdrawAddr = "/cosmos.distribution.v1beta1.MsgSetWithdrawAddress" - distrMsgWithdrawValidatorCommission = "/cosmos.distribution.v1beta1.MsgWithdrawValidatorCommission" - distrMsgFundCommunityPool = "/cosmos.distribution.v1beta1.MsgFundCommunityPool" - distrMsgWithdrawDelegatorReward = "/cosmos.distribution.v1beta1.MsgWithdrawDelegatorReward" - feegrantMsgGrantAllowance = "/cosmos.feegrant.v1beta1.MsgGrantAllowance" - feegrantMsgRevokeAllowance = "/cosmos.feegrant.v1beta1.MsgRevokeAllowance" - govMsgVoteWeighted = "/cosmos.gov.v1beta1.MsgVoteWeighted" - govMsgSubmitProposal = "/cosmos.gov.v1beta1.MsgSubmitProposal" - govMsgDeposit = "/cosmos.gov.v1beta1.MsgDeposit" - govMsgVote = "/cosmos.gov.v1beta1.MsgVote" - stakingMsgEditValidator = "/cosmos.staking.v1beta1.MsgEditValidator" - stakingMsgDelegate = "/cosmos.staking.v1beta1.MsgDelegate" - stakingMsgUndelegate = "/cosmos.staking.v1beta1.MsgUndelegate" - stakingMsgBeginRedelegate = "/cosmos.staking.v1beta1.MsgBeginRedelegate" - stakingMsgCreateValidator = "/cosmos.staking.v1beta1.MsgCreateValidator" - vestingMsgCreateVestingAccount = "/cosmos.vesting.v1beta1.MsgCreateVestingAccount" - ibcMsgTransfer = "/ibc.applications.transfer.v1.MsgTransfer" - liquidityMsgSwapWithinBatch = "/tendermint.liquidity.v1beta1.MsgSwapWithinBatch" //#nosec G101 -- This is a false positive - liquidityMsgCreatePool = "/tendermint.liquidity.v1beta1.MsgCreatePool" - liquidityMsgDepositWithinBatch = "/tendermint.liquidity.v1beta1.MsgDepositWithinBatch" - liquidityMsgWithdrawWithinBatch = "/tendermint.liquidity.v1beta1.MsgWithdrawWithinBatch" -) - -var Upgrade = upgrades.Upgrade{ - UpgradeName: UpgradeName, - CreateUpgradeHandler: CreateUpgradeHandler, - StoreUpgrades: store.StoreUpgrades{ - Added: []string{icahosttypes.StoreKey}, - }, -} diff --git a/app/upgrades/v7/upgrades.go b/app/upgrades/v7/upgrades.go deleted file mode 100644 index 161c5809..00000000 --- a/app/upgrades/v7/upgrades.go +++ /dev/null @@ -1,72 +0,0 @@ -//go:build upgrade_v7 - -package v7 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/module" - upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" - ica "github.com/cosmos/ibc-go/v7/modules/apps/27-interchain-accounts" - icacontrollertypes "github.com/cosmos/ibc-go/v7/modules/apps/27-interchain-accounts/controller/types" - icahosttypes "github.com/cosmos/ibc-go/v7/modules/apps/27-interchain-accounts/host/types" - icatypes "github.com/cosmos/ibc-go/v7/modules/apps/27-interchain-accounts/types" - - "github.com/cosmos/gaia/v15/app/keepers" -) - -func CreateUpgradeHandler( - mm *module.Manager, - configurator module.Configurator, - _ *keepers.AppKeepers, -) upgradetypes.UpgradeHandler { - return func(ctx sdk.Context, plan upgradetypes.Plan, vm module.VersionMap) (module.VersionMap, error) { - vm[icatypes.ModuleName] = mm.Modules[icatypes.ModuleName].ConsensusVersion() - // create ICS27 Controller submodule params - controllerParams := icacontrollertypes.Params{} - // create ICS27 Host submodule params - hostParams := icahosttypes.Params{ - HostEnabled: true, - AllowMessages: []string{ - authzMsgExec, - authzMsgGrant, - authzMsgRevoke, - bankMsgSend, - bankMsgMultiSend, - distrMsgSetWithdrawAddr, - distrMsgWithdrawValidatorCommission, - distrMsgFundCommunityPool, - distrMsgWithdrawDelegatorReward, - feegrantMsgGrantAllowance, - feegrantMsgRevokeAllowance, - govMsgVoteWeighted, - govMsgSubmitProposal, - govMsgDeposit, - govMsgVote, - stakingMsgEditValidator, - stakingMsgDelegate, - stakingMsgUndelegate, - stakingMsgBeginRedelegate, - stakingMsgCreateValidator, - vestingMsgCreateVestingAccount, - ibcMsgTransfer, - liquidityMsgCreatePool, - liquidityMsgSwapWithinBatch, - liquidityMsgDepositWithinBatch, - liquidityMsgWithdrawWithinBatch, - }, - } - - ctx.Logger().Info("start to init interchainaccount module...") - - // initialize ICS27 module - icaModule, correctTypecast := mm.Modules[icatypes.ModuleName].(ica.AppModule) - if !correctTypecast { - panic("mm.Modules[icatypes.ModuleName] is not of type ica.AppModule") - } - icaModule.InitModule(ctx, controllerParams, hostParams) - - ctx.Logger().Info("start to run module migrations...") - - return mm.RunMigrations(ctx, configurator, vm) - } -} diff --git a/app/upgrades/v8/constants.go b/app/upgrades/v8/constants.go deleted file mode 100644 index a5507ed8..00000000 --- a/app/upgrades/v8/constants.go +++ /dev/null @@ -1,25 +0,0 @@ -//go:build upgrade_v8 - -package v8 - -import ( - store "github.com/cosmos/cosmos-sdk/store/types" - - "github.com/cosmos/gaia/v15/app/upgrades" - "github.com/cosmos/gaia/v15/x/globalfee" -) - -const ( - // UpgradeName defines the on-chain upgrade name. - UpgradeName = "v8-Rho" -) - -var Upgrade = upgrades.Upgrade{ - UpgradeName: UpgradeName, - CreateUpgradeHandler: CreateUpgradeHandler, - StoreUpgrades: store.StoreUpgrades{ - Added: []string{ - globalfee.ModuleName, - }, - }, -} diff --git a/app/upgrades/v8/upgrades.go b/app/upgrades/v8/upgrades.go deleted file mode 100644 index f65a97c7..00000000 --- a/app/upgrades/v8/upgrades.go +++ /dev/null @@ -1,140 +0,0 @@ -//go:build upgrade_v8 - -package v8 - -import ( - "errors" - "fmt" - - "github.com/cosmos/cosmos-sdk/store/prefix" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/module" - banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" - upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" - icahosttypes "github.com/cosmos/ibc-go/v7/modules/apps/27-interchain-accounts/host/types" - icatypes "github.com/cosmos/ibc-go/v7/modules/apps/27-interchain-accounts/types" - ibcchanneltypes "github.com/cosmos/ibc-go/v7/modules/core/04-channel/types" - - "github.com/cosmos/gaia/v15/app/keepers" -) - -func FixBankMetadata(ctx sdk.Context, keepers *keepers.AppKeepers) error { - ctx.Logger().Info("Starting fix bank metadata...") - - malformedDenom := "uatomu" - correctDenom := "uatom" - - atomMetaData, foundMalformed := keepers.BankKeeper.GetDenomMetaData(ctx, malformedDenom) - if foundMalformed { - // save it with the correct denom - keepers.BankKeeper.SetDenomMetaData(ctx, atomMetaData) - - // delete the old format - key := keepers.GetKey(banktypes.ModuleName) - store := ctx.KVStore(key) - oldDenomMetaDataStore := prefix.NewStore(store, banktypes.DenomMetadataPrefix) - oldDenomMetaDataStore.Delete([]byte(malformedDenom)) - - // confirm whether the old key is still accessible - _, foundMalformed = keepers.BankKeeper.GetDenomMetaData(ctx, malformedDenom) - if foundMalformed { - return errors.New("malformed 'uatomu' denom not fixed") - } - } - - // proceed with the original intention of populating the missing Name and Symbol fields - atomMetaData, foundCorrect := keepers.BankKeeper.GetDenomMetaData(ctx, correctDenom) - if !foundCorrect { - return errors.New("atom denom not found") - } - - atomMetaData.Name = "Cosmos Hub Atom" - atomMetaData.Symbol = "ATOM" - keepers.BankKeeper.SetDenomMetaData(ctx, atomMetaData) - - ctx.Logger().Info("Fix bank metadata complete") - - return nil -} - -func QuicksilverFix(ctx sdk.Context, keepers *keepers.AppKeepers) error { - ctx.Logger().Info("Starting fix quicksilver...") - - // Refund stuck coins from ica address - sourceAddress, err := sdk.AccAddressFromBech32("cosmos13dqvh4qtg4gzczuktgnw8gc2ewnwmhdwnctekxctyr4azz4dcyysecgq7e") - if err != nil { - return errors.New("invalid source address") - } - destinationAddress, err := sdk.AccAddressFromBech32("cosmos1jc24kwznud9m3mwqmcz3xw33ndjuufnghstaag") - if err != nil { - return errors.New("invalid destination address") - } - - // Get balance from stuck address and subtract 1 uatom sent by bad actor - sourceBalance := keepers.BankKeeper.GetBalance(ctx, sourceAddress, "uatom") - if sourceBalance.IsGTE(sdk.NewCoin("uatom", sdk.NewInt(1))) { - refundBalance := sourceBalance.SubAmount(sdk.NewInt(1)) - err = keepers.BankKeeper.SendCoins(ctx, sourceAddress, destinationAddress, sdk.NewCoins(refundBalance)) - if err != nil { - return errors.New("unable to refund coins") - } - } - - // Close channels - closeChannel(keepers, ctx, "channel-462") - closeChannel(keepers, ctx, "channel-463") - closeChannel(keepers, ctx, "channel-464") - closeChannel(keepers, ctx, "channel-465") - closeChannel(keepers, ctx, "channel-466") - - ctx.Logger().Info("Fix quicksilver complete") - - return nil -} - -func closeChannel(keepers *keepers.AppKeepers, ctx sdk.Context, channelID string) { - channel, found := keepers.IBCKeeper.ChannelKeeper.GetChannel(ctx, icatypes.HostPortID, channelID) - if found { - channel.State = ibcchanneltypes.CLOSED - keepers.IBCKeeper.ChannelKeeper.SetChannel(ctx, icatypes.HostPortID, channelID, channel) - } -} - -func CreateUpgradeHandler( - mm *module.Manager, - configurator module.Configurator, - keepers *keepers.AppKeepers, -) upgradetypes.UpgradeHandler { - return func(ctx sdk.Context, plan upgradetypes.Plan, vm module.VersionMap) (module.VersionMap, error) { - ctx.Logger().Info("Running upgrade fixes...") - - err := FixBankMetadata(ctx, keepers) - if err != nil { - ctx.Logger().Info(fmt.Sprintf("error fixing bank metadata: %s", err.Error())) - } - - err = QuicksilverFix(ctx, keepers) - if err != nil { - return vm, err - } - - // Change hostParams allow_messages = [*] instead of whitelisting individual messages - hostParams := icahosttypes.Params{ - HostEnabled: true, - AllowMessages: []string{"*"}, - } - - // Update params for host & controller keepers - keepers.ICAHostKeeper.SetParams(ctx, hostParams) - - ctx.Logger().Info("Starting module migrations...") - - vm, err = mm.RunMigrations(ctx, configurator, vm) - if err != nil { - return vm, err - } - - ctx.Logger().Info("Upgrade complete") - return vm, err - } -} diff --git a/app/upgrades/v9/constants.go b/app/upgrades/v9/constants.go deleted file mode 100644 index d0b8c792..00000000 --- a/app/upgrades/v9/constants.go +++ /dev/null @@ -1,27 +0,0 @@ -//go:build upgrade_v9 - -package v9 - -import ( - store "github.com/cosmos/cosmos-sdk/store/types" - ccvprovider "github.com/cosmos/interchain-security/v3/x/ccv/provider/types" - - store "github.com/cosmos/cosmos-sdk/store/types" - - "github.com/cosmos/gaia/v15/app/upgrades" -) - -const ( - // UpgradeName defines the on-chain upgrade name. - UpgradeName = "v9-Lambda" -) - -var Upgrade = upgrades.Upgrade{ - UpgradeName: UpgradeName, - CreateUpgradeHandler: CreateUpgradeHandler, - StoreUpgrades: store.StoreUpgrades{ - Added: []string{ - ccvprovider.ModuleName, - }, - }, -} diff --git a/app/upgrades/v9/upgrades.go b/app/upgrades/v9/upgrades.go deleted file mode 100644 index 0f26347f..00000000 --- a/app/upgrades/v9/upgrades.go +++ /dev/null @@ -1,29 +0,0 @@ -//go:build upgrade_v9 - -package v9 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/module" - upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" - - "github.com/cosmos/gaia/v15/app/keepers" -) - -func CreateUpgradeHandler( - mm *module.Manager, - configurator module.Configurator, - _ *keepers.AppKeepers, -) upgradetypes.UpgradeHandler { - return func(ctx sdk.Context, plan upgradetypes.Plan, vm module.VersionMap) (module.VersionMap, error) { - ctx.Logger().Info("Starting module migrations...") - - vm, err := mm.RunMigrations(ctx, configurator, vm) - if err != nil { - return vm, err - } - - ctx.Logger().Info("Upgrade complete") - return vm, err - } -} diff --git a/client/docs/config.json b/client/docs/config.json index 8c396039..5a213ce7 100644 --- a/client/docs/config.json +++ b/client/docs/config.json @@ -1,18 +1,9 @@ { "swagger": "2.0", "info": { - "title": "Cosmoshub - gRPC Gateway docs", + "title": "AtomOne - gRPC Gateway docs", "description": "A REST interface for state queries", "version": "1.0.0" }, - "apis": [ - { - "url": "./tmp-swagger-gen/gaia/globalfee/v1beta1/query.swagger.json", - "operationIds": { - "rename": { - "Params": "GlobalfeeParams" - } - } - } - ] -} \ No newline at end of file + "apis": [] +} diff --git a/client/docs/swagger-ui/swagger.yaml b/client/docs/swagger-ui/swagger.yaml index 5515ee9b..21d0b534 100644 --- a/client/docs/swagger-ui/swagger.yaml +++ b/client/docs/swagger-ui/swagger.yaml @@ -1,121 +1,5 @@ swagger: '2.0' info: - title: Cosmoshub - gRPC Gateway docs + title: AtomOne - gRPC Gateway docs description: A REST interface for state queries version: 1.0.0 -paths: - /gaia/globalfee/v1beta1/minimum_gas_prices: - get: - operationId: MinimumGasPrices - responses: - '200': - description: A successful response. - schema: - type: object - properties: - minimum_gas_prices: - type: array - items: - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - DecCoin defines a token with a denomination and a decimal - amount. - - - NOTE: The amount field is an Dec which implements the custom - method - - signatures required by gogoproto. - description: |- - QueryMinimumGasPricesResponse is the response type for the - Query/MinimumGasPrices RPC method. - default: - description: An unexpected error response. - schema: - type: object - properties: - error: - type: string - code: - type: integer - format: int32 - message: - type: string - details: - type: array - items: - type: object - properties: - type_url: - type: string - value: - type: string - format: byte - tags: - - Query -definitions: - cosmos.base.v1beta1.DecCoin: - type: object - properties: - denom: - type: string - amount: - type: string - description: |- - DecCoin defines a token with a denomination and a decimal amount. - - NOTE: The amount field is an Dec which implements the custom method - signatures required by gogoproto. - gaia.globalfee.v1beta1.QueryMinimumGasPricesResponse: - type: object - properties: - minimum_gas_prices: - type: array - items: - type: object - properties: - denom: - type: string - amount: - type: string - description: |- - DecCoin defines a token with a denomination and a decimal amount. - - NOTE: The amount field is an Dec which implements the custom method - signatures required by gogoproto. - description: |- - QueryMinimumGasPricesResponse is the response type for the - Query/MinimumGasPrices RPC method. - google.protobuf.Any: - type: object - properties: - type_url: - type: string - value: - type: string - format: byte - grpc.gateway.runtime.Error: - type: object - properties: - error: - type: string - code: - type: integer - format: int32 - message: - type: string - details: - type: array - items: - type: object - properties: - type_url: - type: string - value: - type: string - format: byte diff --git a/cmd/gaiad/cmd/bech32_convert.go b/cmd/atomoned/cmd/bech32_convert.go similarity index 77% rename from cmd/gaiad/cmd/bech32_convert.go rename to cmd/atomoned/cmd/bech32_convert.go index 283592a5..ba464594 100644 --- a/cmd/gaiad/cmd/bech32_convert.go +++ b/cmd/atomoned/cmd/bech32_convert.go @@ -5,7 +5,7 @@ import ( "github.com/spf13/cobra" - addressutil "github.com/cosmos/gaia/v15/pkg/address" + addressutil "github.com/atomone-hub/atomone/pkg/address" ) var flagBech32Prefix = "prefix" @@ -18,9 +18,9 @@ func AddBech32ConvertCommand() *cobra.Command { Long: `Convert any bech32 string to the cosmos prefix Example: - gaiad debug bech32-convert akash1a6zlyvpnksx8wr6wz8wemur2xe8zyh0ytz6d88 + atomoned debug bech32-convert akash1a6zlyvpnksx8wr6wz8wemur2xe8zyh0ytz6d88 - gaiad debug bech32-convert stride1673f0t8p893rqyqe420mgwwz92ac4qv6synvx2 --prefix osmo + atomoned debug bech32-convert stride1673f0t8p893rqyqe420mgwwz92ac4qv6synvx2 --prefix osmo `, Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { @@ -41,7 +41,7 @@ Example: }, } - cmd.Flags().StringP(flagBech32Prefix, "p", "cosmos", "Bech32 Prefix to encode to") + cmd.Flags().StringP(flagBech32Prefix, "p", "atone", "Bech32 Prefix to encode to") return cmd } diff --git a/cmd/gaiad/cmd/genaccounts.go b/cmd/atomoned/cmd/genaccounts.go similarity index 100% rename from cmd/gaiad/cmd/genaccounts.go rename to cmd/atomoned/cmd/genaccounts.go diff --git a/cmd/gaiad/cmd/root.go b/cmd/atomoned/cmd/root.go similarity index 90% rename from cmd/gaiad/cmd/root.go rename to cmd/atomoned/cmd/root.go index 0656cd75..475ef6a2 100644 --- a/cmd/gaiad/cmd/root.go +++ b/cmd/atomoned/cmd/root.go @@ -40,14 +40,14 @@ import ( "github.com/cosmos/cosmos-sdk/x/crisis" genutilcli "github.com/cosmos/cosmos-sdk/x/genutil/client/cli" - gaia "github.com/cosmos/gaia/v15/app" - "github.com/cosmos/gaia/v15/app/params" + atomone "github.com/atomone-hub/atomone/app" + "github.com/atomone-hub/atomone/app/params" ) // NewRootCmd creates a new root command for simd. It is called once in the // main function. func NewRootCmd() (*cobra.Command, params.EncodingConfig) { - encodingConfig := gaia.RegisterEncodingConfig() + encodingConfig := atomone.RegisterEncodingConfig() initClientCtx := client.Context{}. WithCodec(encodingConfig.Marshaler). WithInterfaceRegistry(encodingConfig.InterfaceRegistry). @@ -55,11 +55,11 @@ func NewRootCmd() (*cobra.Command, params.EncodingConfig) { WithLegacyAmino(encodingConfig.Amino). WithInput(os.Stdin). WithAccountRetriever(types.AccountRetriever{}). - WithHomeDir(gaia.DefaultNodeHome). + WithHomeDir(atomone.DefaultNodeHome). WithViper("") rootCmd := &cobra.Command{ - Use: "gaiad", + Use: "atomoned", Short: "Stargate Cosmos Hub App", PersistentPreRunE: func(cmd *cobra.Command, _ []string) error { cmd.SetOut(cmd.OutOrStdout()) @@ -130,16 +130,16 @@ func initRootCmd(rootCmd *cobra.Command, encodingConfig params.EncodingConfig) { ac := appCreator{encodingConfig} rootCmd.AddCommand( - genutilcli.InitCmd(gaia.ModuleBasics, gaia.DefaultNodeHome), + genutilcli.InitCmd(atomone.ModuleBasics, atomone.DefaultNodeHome), tmcli.NewCompletionCmd(rootCmd, true), - NewTestnetCmd(gaia.ModuleBasics, banktypes.GenesisBalancesIterator{}), + NewTestnetCmd(atomone.ModuleBasics, banktypes.GenesisBalancesIterator{}), addDebugCommands(debug.Cmd()), config.Cmd(), pruning.PruningCmd(ac.newApp), snapshot.Cmd(ac.newApp), ) - server.AddCommands(rootCmd, gaia.DefaultNodeHome, ac.newApp, ac.appExport, addModuleInitFlags) + server.AddCommands(rootCmd, atomone.DefaultNodeHome, ac.newApp, ac.appExport, addModuleInitFlags) // add keybase, auxiliary RPC, query, and tx child commands rootCmd.AddCommand( @@ -147,7 +147,7 @@ func initRootCmd(rootCmd *cobra.Command, encodingConfig params.EncodingConfig) { genesisCommand(encodingConfig), queryCommand(), txCommand(), - keys.Commands(gaia.DefaultNodeHome), + keys.Commands(atomone.DefaultNodeHome), ) // add rosetta @@ -160,7 +160,7 @@ func addModuleInitFlags(startCmd *cobra.Command) { // genesisCommand builds genesis-related `simd genesis` command. Users may provide application specific commands as a parameter func genesisCommand(encodingConfig params.EncodingConfig, cmds ...*cobra.Command) *cobra.Command { - cmd := genutilcli.GenesisCoreCommand(encodingConfig.TxConfig, gaia.ModuleBasics, gaia.DefaultNodeHome) + cmd := genutilcli.GenesisCoreCommand(encodingConfig.TxConfig, atomone.ModuleBasics, atomone.DefaultNodeHome) for _, subCmd := range cmds { cmd.AddCommand(subCmd) @@ -186,7 +186,7 @@ func queryCommand() *cobra.Command { authcmd.QueryTxCmd(), ) - gaia.ModuleBasics.AddQueryCommands(cmd) + atomone.ModuleBasics.AddQueryCommands(cmd) cmd.PersistentFlags().String(flags.FlagChainID, "", "The network chain ID") return cmd @@ -214,7 +214,7 @@ func txCommand() *cobra.Command { authcmd.GetAuxToFeeCommand(), ) - gaia.ModuleBasics.AddTxCommands(cmd) + atomone.ModuleBasics.AddTxCommands(cmd) cmd.PersistentFlags().String(flags.FlagChainID, "", "The network chain ID") return cmd @@ -287,7 +287,7 @@ func (a appCreator) newApp( baseapp.SetIAVLCacheSize(cast.ToInt(appOpts.Get(server.FlagIAVLCacheSize))), } - return gaia.NewGaiaApp( + return atomone.NewAtomOneApp( logger, db, traceStore, @@ -310,7 +310,7 @@ func (a appCreator) appExport( appOpts servertypes.AppOptions, modulesToExport []string, ) (servertypes.ExportedApp, error) { - var gaiaApp *gaia.GaiaApp + var atomoneApp *atomone.AtomOneApp homePath, ok := appOpts.Get(flags.FlagHome).(string) if !ok || homePath == "" { @@ -331,7 +331,7 @@ func (a appCreator) appExport( loadLatest = true } - gaiaApp = gaia.NewGaiaApp( + atomoneApp = atomone.NewAtomOneApp( logger, db, traceStore, @@ -343,10 +343,10 @@ func (a appCreator) appExport( ) if height != -1 { - if err := gaiaApp.LoadHeight(height); err != nil { + if err := atomoneApp.LoadHeight(height); err != nil { return servertypes.ExportedApp{}, err } } - return gaiaApp.ExportAppStateAndValidators(forZeroHeight, jailAllowedAddrs, modulesToExport) + return atomoneApp.ExportAppStateAndValidators(forZeroHeight, jailAllowedAddrs, modulesToExport) } diff --git a/cmd/gaiad/cmd/root_test.go b/cmd/atomoned/cmd/root_test.go similarity index 81% rename from cmd/gaiad/cmd/root_test.go rename to cmd/atomoned/cmd/root_test.go index 52a61bc9..324b6305 100644 --- a/cmd/gaiad/cmd/root_test.go +++ b/cmd/atomoned/cmd/root_test.go @@ -7,8 +7,8 @@ import ( svrcmd "github.com/cosmos/cosmos-sdk/server/cmd" - app "github.com/cosmos/gaia/v15/app" - "github.com/cosmos/gaia/v15/cmd/gaiad/cmd" + app "github.com/atomone-hub/atomone/app" + "github.com/atomone-hub/atomone/cmd/atomoned/cmd" ) func TestRootCmdConfig(t *testing.T) { diff --git a/cmd/gaiad/cmd/testnet.go b/cmd/atomoned/cmd/testnet.go similarity index 100% rename from cmd/gaiad/cmd/testnet.go rename to cmd/atomoned/cmd/testnet.go diff --git a/cmd/gaiad/main.go b/cmd/atomoned/main.go similarity index 79% rename from cmd/gaiad/main.go rename to cmd/atomoned/main.go index 06aa0743..b63a062f 100644 --- a/cmd/gaiad/main.go +++ b/cmd/atomoned/main.go @@ -6,8 +6,8 @@ import ( "github.com/cosmos/cosmos-sdk/server" svrcmd "github.com/cosmos/cosmos-sdk/server/cmd" - app "github.com/cosmos/gaia/v15/app" - "github.com/cosmos/gaia/v15/cmd/gaiad/cmd" + app "github.com/atomone-hub/atomone/app" + "github.com/atomone-hub/atomone/cmd/atomoned/cmd" ) func main() { diff --git a/contrib/Dockerfile.test b/contrib/Dockerfile.test index 569fa14a..8836a0a8 100644 --- a/contrib/Dockerfile.test +++ b/contrib/Dockerfile.test @@ -1,14 +1,14 @@ # Simple usage with a mounted data directory: -# > docker build -t gaia . -# > docker run -it -p 46657:46657 -p 46656:46656 -v ~/.gaia:/root/.gaia gaia gaiad init -# > docker run -it -p 46657:46657 -p 46656:46656 -v ~/.gaia:/root/.gaia gaia gaiad start +# > docker build -t atomone . +# > docker run -it -p 46657:46657 -p 46656:46656 -v ~/.atomone:/root/.atomone atomone atomoned init +# > docker run -it -p 46657:46657 -p 46656:46656 -v ~/.atomone:/root/.atomone atomone atomoned start FROM golang:1.21-alpine AS build-env # Set up dependencies ENV PACKAGES curl make git libc-dev bash gcc linux-headers eudev-dev python3 # Set working directory for the build -WORKDIR /go/src/github.com/cosmos/gaia +WORKDIR /go/src/github.com/atomone-hub/atomone # Add source files COPY . . @@ -25,7 +25,7 @@ RUN apk add --update ca-certificates WORKDIR /root # Copy over binaries from the build-env -COPY --from=build-env /go/bin/gaiad /usr/bin/gaiad +COPY --from=build-env /go/bin/atomoned /usr/bin/atomoned COPY ./contrib/single-node.sh . diff --git a/contrib/denom.json b/contrib/denom.json index 71c0a9df..be682bec 100644 --- a/contrib/denom.json +++ b/contrib/denom.json @@ -1,30 +1,30 @@ [{ - "base": "uatom", + "base": "uatone", "denom_units": [ { "aliases": [ - "microatom" + "microatone" ], - "denom": "uatom", + "denom": "uatone", "exponent": 0 }, { "aliases": [ - "milliatom" + "milliatone" ], - "denom": "matom", + "denom": "matone", "exponent": 3 }, { "aliases": [], - "denom": "atom", + "denom": "atone", "exponent": 6 } ], - "description": "The native staking token of the Cosmos Hub.", - "display": "atom", - "name": "asdf", - "symbol": "asdf" -}] \ No newline at end of file + "description": "The native staking token of AtomOne.", + "display": "ATONE", + "name": "ATONE", + "symbol": "ATONE" +}] diff --git a/contrib/generate_release_note/main.go b/contrib/generate_release_note/main.go index 6bcdfedf..a12459b9 100644 --- a/contrib/generate_release_note/main.go +++ b/contrib/generate_release_note/main.go @@ -30,7 +30,7 @@ func main() { } note := strings.Builder{} - note.WriteString(fmt.Sprintf("# Gaia %s Release Notes\n", args[1])) + note.WriteString(fmt.Sprintf("# AtomOne %s Release Notes\n", args[1])) note.WriteString(changelog) note.WriteString("```\n") note.Write(buildReport) diff --git a/contrib/githooks/pre-commit b/contrib/githooks/pre-commit index 99eca6eb..eab16280 100755 --- a/contrib/githooks/pre-commit +++ b/contrib/githooks/pre-commit @@ -31,7 +31,7 @@ if [[ $STAGED_GO_FILES != "" ]]; then gofmt -w -s $file misspell -w $file - goimports -w -local github.com/cosmos/gaia $file + goimports -w -local github.com/atomeone-hub/atomone $file git add $file done diff --git a/contrib/githooks/precommit b/contrib/githooks/precommit index 99eca6eb..8a69ba9c 100644 --- a/contrib/githooks/precommit +++ b/contrib/githooks/precommit @@ -31,7 +31,7 @@ if [[ $STAGED_GO_FILES != "" ]]; then gofmt -w -s $file misspell -w $file - goimports -w -local github.com/cosmos/gaia $file + goimports -w -local github.com/atomeone-hub/atomeone $file git add $file done diff --git a/contrib/scripts/local-gaia.sh b/contrib/scripts/local-atomone.sh similarity index 71% rename from contrib/scripts/local-gaia.sh rename to contrib/scripts/local-atomone.sh index c307b5c8..70beda33 100755 --- a/contrib/scripts/local-gaia.sh +++ b/contrib/scripts/local-atomone.sh @@ -22,11 +22,11 @@ PROV_KEY=${MONIKER}-key # Clean start -pkill -f gaiad &> /dev/null || true +pkill -f atomoned &> /dev/null || true rm -rf ${PROV_NODE_DIR} # Build file and node directory structure -gaiad init $MONIKER --chain-id provider --home ${PROV_NODE_DIR} +atomoned init $MONIKER --chain-id provider --home ${PROV_NODE_DIR} jq ".app_state.gov.voting_params.voting_period = \"20s\" | .app_state.staking.params.unbonding_time = \"86400s\"" \ ${PROV_NODE_DIR}/config/genesis.json > \ ${PROV_NODE_DIR}/edited_genesis.json && mv ${PROV_NODE_DIR}/edited_genesis.json ${PROV_NODE_DIR}/config/genesis.json @@ -34,20 +34,20 @@ gaiad init $MONIKER --chain-id provider --home ${PROV_NODE_DIR} sleep 1 # Create account keypair -gaiad keys add $PROV_KEY --home ${PROV_NODE_DIR} --keyring-backend test --output json > ${PROV_NODE_DIR}/${PROV_KEY}.json 2>&1 +atomoned keys add $PROV_KEY --home ${PROV_NODE_DIR} --keyring-backend test --output json > ${PROV_NODE_DIR}/${PROV_KEY}.json 2>&1 sleep 1 # Add stake to user PROV_ACCOUNT_ADDR=$(jq -r '.address' ${PROV_NODE_DIR}/${PROV_KEY}.json) -gaiad add-genesis-account $PROV_ACCOUNT_ADDR $USER_COINS --home ${PROV_NODE_DIR} --keyring-backend test +atomoned add-genesis-account $PROV_ACCOUNT_ADDR $USER_COINS --home ${PROV_NODE_DIR} --keyring-backend test sleep 1 # Stake 1/1000 user's coins -gaiad gentx $PROV_KEY $STAKE --chain-id provider --home ${PROV_NODE_DIR} --keyring-backend test --moniker $MONIKER +atomoned gentx $PROV_KEY $STAKE --chain-id provider --home ${PROV_NODE_DIR} --keyring-backend test --moniker $MONIKER sleep 1 -gaiad collect-gentxs --home ${PROV_NODE_DIR} --gentx-dir ${PROV_NODE_DIR}/config/gentx/ +atomoned collect-gentxs --home ${PROV_NODE_DIR} --gentx-dir ${PROV_NODE_DIR}/config/gentx/ sleep 1 sed -i -r "/node =/ s/= .*/= \"tcp:\/\/${NODE_IP}:26658\"/" ${PROV_NODE_DIR}/config/client.toml @@ -55,8 +55,8 @@ sed -i -r 's/timeout_commit = "5s"/timeout_commit = "3s"/g' ${PROV_NODE_DIR}/con sed -i -r 's/timeout_propose = "3s"/timeout_propose = "1s"/g' ${PROV_NODE_DIR}/config/config.toml -# Start gaia -gaiad start \ +# Start atomone +atomoned start \ --home ${PROV_NODE_DIR} \ --rpc.laddr tcp://${NODE_IP}:26658 \ --grpc.address ${NODE_IP}:9091 \ diff --git a/contrib/scripts/upgrade_test_scripts/run_gaia.sh b/contrib/scripts/upgrade_test_scripts/run_gaia.sh deleted file mode 100755 index 9859bb82..00000000 --- a/contrib/scripts/upgrade_test_scripts/run_gaia.sh +++ /dev/null @@ -1,93 +0,0 @@ -#!/bin/sh - -set -o errexit -o nounset - -# find the highest upgrade version number($UPGRADE_VERSION_NUMBER) within the 'app/upgrades' dir. -# the highest upgrade version is used to propose upgrade and create /cosmovisor/upgrades/$UPGRADE_VERSION/bin dir. -UPGRADES_DIR=$(realpath ./app/upgrades) -UPGRADE_VERSION_NUMBER=0 - -for dir in "$UPGRADES_DIR"/*; do - if [ -d "$dir" ]; then - DIR_NAME=$(basename "$dir") - VERSION_NUMBER="${DIR_NAME#v}" - if [ "$VERSION_NUMBER" -gt "$UPGRADE_VERSION_NUMBER" ]; then - UPGRADE_VERSION_NUMBER=$VERSION_NUMBER - fi - fi -done - -if [ -n "$UPGRADE_VERSION_NUMBER" ]; then - echo "Upgrade to version: $UPGRADE_VERSION_NUMBER" -else - echo "No upgrade version found in app/upgrades." -fi - -UPGRADE_VERSION=v$UPGRADE_VERSION_NUMBER -NODE_HOME=$(realpath ./build/.gaia) -echo "NODE_HOME = ${NODE_HOME}" -BINARY=$NODE_HOME/cosmovisor/genesis/bin/gaiad -echo "BINARY = ${BINARY}" -CHAINID=cosmoshub-4 - -USER_MNEMONIC="abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon art" - -if ! test -f "./build/gaiadold"; then - echo "old gaiad binary does not exist" - exit -fi - -rm -rf ./build/.gaia - -mkdir -p "$NODE_HOME"/cosmovisor/genesis/bin -cp ./build/gaiadold "$NODE_HOME"/cosmovisor/genesis/bin/gaiad -$BINARY init upgrader --chain-id $CHAINID --home "$NODE_HOME" - -if ! test -f "./build/gaiadnew"; then - echo "new gaiad binary does not exist" - exit -fi - -mkdir -p "$NODE_HOME"/cosmovisor/upgrades/$UPGRADE_VERSION/bin -cp ./build/gaiadnew "$NODE_HOME"/cosmovisor/upgrades/$UPGRADE_VERSION/bin/gaiad - -GOPATH=$(go env GOPATH) - -export DAEMON_NAME=gaiad -export DAEMON_HOME=$NODE_HOME -COSMOVISOR=$GOPATH/bin/cosmovisor - -$BINARY config chain-id $CHAINID --home $NODE_HOME -$BINARY config keyring-backend test --home $NODE_HOME -tmp=$(mktemp) - -# add bank part of genesis -jq --argjson foo "$(jq -c '.' contrib/denom.json)" '.app_state.bank.denom_metadata = $foo' $NODE_HOME/config/genesis.json >"$tmp" && mv "$tmp" $NODE_HOME/config/genesis.json - -# replace default stake token with uatom -sed -i -e '/total_liquid_staked_tokens/!s/stake/uatom/g' $NODE_HOME/config/genesis.json - -# min deposition amount (this one isn't working) -sed -i -e 's/"amount": "10000000",/"amount": "1",/g' $NODE_HOME/config/genesis.json -# min voting power that a proposal requires in order to be a valid proposal -sed -i -e 's/"quorum": "0.334000000000000000",/"quorum": "0.000000000000000001",/g' $NODE_HOME/config/genesis.json -# the minimum proportion of "yes" votes requires for the proposal to pass -sed -i -e 's/"threshold": "0.500000000000000000",/"threshold": "0.000000000000000001",/g' $NODE_HOME/config/genesis.json -# voting period to 30s -sed -i -e 's/"voting_period": "172800s"/"voting_period": "30s"/g' $NODE_HOME/config/genesis.json - -echo $USER_MNEMONIC | $BINARY --home $NODE_HOME keys add val --recover --keyring-backend=test -$BINARY add-genesis-account val 10000000000000000000000000uatom --home $NODE_HOME --keyring-backend test -$BINARY gentx val 1000000000uatom --home $NODE_HOME --chain-id $CHAINID -$BINARY collect-gentxs --home $NODE_HOME - -sed -i.bak'' 's/minimum-gas-prices = ""/minimum-gas-prices = "0uatom"/' $NODE_HOME/config/app.toml - -perl -i~ -0777 -pe 's/# Enable defines if the API server should be enabled. -enable = false/# Enable defines if the API server should be enabled. -enable = true/g' $NODE_HOME/config/app.toml - -pwd -ls $NODE_HOME - -$COSMOVISOR run start --home $NODE_HOME --x-crisis-skip-assert-invariants >log.out 2>&1 & diff --git a/contrib/scripts/upgrade_test_scripts/run_upgrade_commands.sh b/contrib/scripts/upgrade_test_scripts/run_upgrade_commands.sh deleted file mode 100755 index 454cf07e..00000000 --- a/contrib/scripts/upgrade_test_scripts/run_upgrade_commands.sh +++ /dev/null @@ -1,96 +0,0 @@ -#!/bin/sh - -set -o errexit -o nounset - -UPGRADES_DIR=$(realpath ./app/upgrades) -UPGRADE_VERSION_NUMBER=0 - -for dir in "$UPGRADES_DIR"/*; do - if [ -d "$dir" ]; then - DIR_NAME=$(basename "$dir") - VERSION_NUMBER="${DIR_NAME#v}" - if [ "$VERSION_NUMBER" -gt "$UPGRADE_VERSION_NUMBER" ]; then - UPGRADE_VERSION_NUMBER=$VERSION_NUMBER - fi - fi -done - -if [ -n "$UPGRADE_VERSION_NUMBER" ]; then - echo "Upgrade to version: $UPGRADE_VERSION_NUMBER" -else - echo "No upgrade version found in app/upgrades." -fi - -UPGRADE_VERSION=v$UPGRADE_VERSION_NUMBER -UPGRADE_HEIGHT=$1 - -if [ -z "$1" ]; then - echo "Need to add an upgrade height" - exit 1 -fi - -NODE_HOME=$(realpath ./build/.gaia) - -echo "NODE_HOME = ${NODE_HOME}" - -BINARY=$NODE_HOME/cosmovisor/genesis/bin/gaiad -echo "BINARY = ${BINARY}" - -$BINARY version - -USER_MNEMONIC="abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon art" -CHAINID=cosmoshub-4 - -if test -f "$BINARY"; then - - echo "wait 10 seconds for blockchain to start" - sleep 10 - - $BINARY config chain-id $CHAINID --home $NODE_HOME - $BINARY config output json --home $NODE_HOME - $BINARY config keyring-backend test --home $NODE_HOME - $BINARY config --home $NODE_HOME - - key=$($BINARY keys show val --home $NODE_HOME) - if [ -z "$key" ]; then - echo $USER_MNEMONIC | $BINARY --home $NODE_HOME keys add val --recover --keyring-backend=test - fi - - echo "\n" - echo "Submitting proposal... \n" - $BINARY tx gov submit-proposal software-upgrade $UPGRADE_VERSION \ - --title $UPGRADE_VERSION \ - --deposit 10000000uatom \ - --upgrade-height $UPGRADE_HEIGHT \ - --upgrade-info "upgrade" \ - --description "upgrade" \ - --fees 400uatom \ - --from val \ - --keyring-backend test \ - --chain-id $CHAINID \ - --home $NODE_HOME \ - --node tcp://localhost:26657 \ - --yes - echo "Done \n" - - sleep 6 - echo "Casting vote... \n" - - $BINARY tx gov vote 1 yes \ - --from val \ - --keyring-backend test \ - --chain-id $CHAINID \ - --home $NODE_HOME \ - --fees 400uatom \ - --node tcp://localhost:26657 \ - --yes - - echo "Done \n" - - $BINARY q gov proposals \ - --home $NODE_HOME \ - --node tcp://localhost:26657 - -else - echo "Please build old gaia binary and move to ./build/gaiadold" -fi diff --git a/contrib/scripts/upgrade_test_scripts/test_upgrade.sh b/contrib/scripts/upgrade_test_scripts/test_upgrade.sh deleted file mode 100755 index a3be8161..00000000 --- a/contrib/scripts/upgrade_test_scripts/test_upgrade.sh +++ /dev/null @@ -1,70 +0,0 @@ -#!/bin/bash - -CNT=0 -ITER=$1 -SLEEP=$2 -NUMBLOCKS=$3 -NODEADDR=$4 - -if [ -z "$1" ]; then - echo "Invalid argument: missing number of iterations" - echo "sh test_upgrade.sh " - exit 1 -fi - -if [ -z "$2" ]; then - echo "Invalid argument: missing sleep duration" - echo "sh test_upgrade.sh " - exit 1 -fi - -if [ -z "$3" ]; then - echo "Invalid argument: missing number of blocks" - echo "sh test_upgrade.sh " - exit 1 -fi - -if [ -z "$4" ]; then - echo "Invalid argument: missing node address" - echo "sh test_upgrade.sh " - exit 1 -fi - -echo "running 'sh test_upgrade.sh iterations=$ITER sleep=$SLEEP num-blocks=$NUMBLOCKS node-address=$NODEADDR'" - -started=false -first_version="" - -while [ ${CNT} -lt $ITER ]; do - curr_block=$(curl -s $NODEADDR:26657/status | jq -r '.result.sync_info.latest_block_height') - curr_version=$(curl -s $NODEADDR:1317/cosmos/base/tendermint/v1beta1/node_info | jq -r '.application_version.version') - - - # tail v7.out - - if [[ $started = "false" && $curr_version != "" && $curr_version != "null" ]]; then - started=true - first_version=$curr_version - echo "First version: ${first_version}" - fi - - echo "count is ${CNT}, iteration ${ITER}, version is " $curr_version - - if [[ "$started" = "true" && ${curr_block} -gt ${NUMBLOCKS} && "$curr_version" != $first_version && "$curr_version" != "" && "$curr_version" != "null" ]]; then - echo "new version running" - exit 0 - fi - - if [[ ${curr_block} -gt ${NUMBLOCKS} ]]; then - echo "Failed: produced ${curr_block} without upgrading" - exit 1 - fi - - - CNT=$(($CNT+1)) - - sleep $SLEEP -done - -echo "Failed: timeout reached" -exit 1 diff --git a/contrib/scripts/upgrade_test_scripts/v10/run_gaia_v9.sh b/contrib/scripts/upgrade_test_scripts/v10/run_gaia_v9.sh deleted file mode 100644 index df666d2c..00000000 --- a/contrib/scripts/upgrade_test_scripts/v10/run_gaia_v9.sh +++ /dev/null @@ -1,73 +0,0 @@ -#!/bin/sh - -set -o errexit -o nounset - -NODE_HOME=$(realpath ./build/.gaia) -echo "NODE_HOME = ${NODE_HOME}" -BINARY=$NODE_HOME/cosmovisor/genesis/bin/gaiad -echo "BINARY = ${BINARY}" -CHAINID=cosmoshub-4 - -USER_MNEMONIC="abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon art" - -if ! test -f "./build/gaiad9"; then - echo "gaiad v9 does not exist" - exit -fi - - -rm -rf ./build/.gaia - -mkdir -p "$NODE_HOME"/cosmovisor/genesis/bin -cp ./build/gaiad9 "$NODE_HOME"/cosmovisor/genesis/bin/gaiad -$BINARY init upgrader --chain-id $CHAINID --home "$NODE_HOME" - - -if ! test -f "./build/gaiad10"; then - echo "gaiad v10 does not exist" - exit -fi - -mkdir -p "$NODE_HOME"/cosmovisor/upgrades/v10/bin -cp ./build/gaiad10 "$NODE_HOME"/cosmovisor/upgrades/v10/bin/gaiad - -GOPATH=$(go env GOPATH) - -export DAEMON_NAME=gaiad -export DAEMON_HOME=$NODE_HOME -COSMOVISOR=$GOPATH/bin/cosmovisor - - -$BINARY config chain-id $CHAINID --home $NODE_HOME -$BINARY config keyring-backend test --home $NODE_HOME -tmp=$(mktemp) - -# add bank part of genesis -jq --argjson foo "$(jq -c '.' contrib/denom.json)" '.app_state.bank.denom_metadata = $foo' $NODE_HOME/config/genesis.json > "$tmp" && mv "$tmp" $NODE_HOME/config/genesis.json - -# replace default stake token with uatom -sed -i -e 's/stake/uatom/g' $NODE_HOME/config/genesis.json -# min deposition amount (this one isn't working) -sed -i -e 's%"amount": "10000000",%"amount": "1",%g' $NODE_HOME/config/genesis.json -# min voting power that a proposal requires in order to be a valid proposal -sed -i -e 's%"quorum": "0.334000000000000000",%"quorum": "0.000000000000000001",%g' $NODE_HOME/config/genesis.json -# the minimum proportion of "yes" votes requires for the proposal to pass -sed -i -e 's%"threshold": "0.500000000000000000",%"threshold": "0.000000000000000001",%g' $NODE_HOME/config/genesis.json -# voting period to 30s -sed -i -e 's%"voting_period": "172800s"%"voting_period": "30s"%g' $NODE_HOME/config/genesis.json - -echo $USER_MNEMONIC | $BINARY --home $NODE_HOME keys add val --recover --keyring-backend=test -$BINARY add-genesis-account val 10000000000000000000000000uatom --home $NODE_HOME --keyring-backend test -$BINARY gentx val 1000000000uatom --home $NODE_HOME --chain-id $CHAINID -$BINARY collect-gentxs --home $NODE_HOME - -sed -i.bak'' 's/minimum-gas-prices = ""/minimum-gas-prices = "0uatom"/' $NODE_HOME/config/app.toml - -perl -i~ -0777 -pe 's/# Enable defines if the API server should be enabled. -enable = false/# Enable defines if the API server should be enabled. -enable = true/g' $NODE_HOME/config/app.toml - -pwd -ls $NODE_HOME - -$COSMOVISOR run start --home $NODE_HOME --x-crisis-skip-assert-invariants > v9.out 2>&1 & diff --git a/contrib/scripts/upgrade_test_scripts/v10/run_upgrade_commands_v10.sh b/contrib/scripts/upgrade_test_scripts/v10/run_upgrade_commands_v10.sh deleted file mode 100644 index ceaa4fd7..00000000 --- a/contrib/scripts/upgrade_test_scripts/v10/run_upgrade_commands_v10.sh +++ /dev/null @@ -1,76 +0,0 @@ -#!/bin/sh - -set -o errexit -o nounset - -UPGRADE_HEIGHT=$1 - -if [ -z "$1" ]; then -echo "Need to add an upgrade height" -exit 1 -fi - -NODE_HOME=$(realpath ./build/.gaia) - -echo "NODE_HOME = ${NODE_HOME}" - -BINARY=$NODE_HOME/cosmovisor/genesis/bin/gaiad -echo "BINARY = ${BINARY}" - -$BINARY version - -USER_MNEMONIC="abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon art" -CHAINID=cosmoshub-4 - -if test -f "$BINARY"; then - -echo "wait 10 seconds for blockchain to start" -sleep 10 - -$BINARY config chain-id $CHAINID --home $NODE_HOME -$BINARY config output json --home $NODE_HOME -$BINARY config keyring-backend test --home $NODE_HOME -$BINARY config --home $NODE_HOME - - -key=$($BINARY keys show val --home $NODE_HOME) -if [ -z "$key" ]; then -echo $USER_MNEMONIC | $BINARY --home $NODE_HOME keys add val --recover --keyring-backend=test -fi - - -echo "\n" -echo "Submitting proposal... \n" -$BINARY tx gov submit-proposal software-upgrade v10 \ ---title v10 \ ---deposit 10000000uatom \ ---upgrade-height $UPGRADE_HEIGHT \ ---upgrade-info "upgrade to v10" \ ---description "upgrade to v10" \ ---gas auto \ ---fees 400uatom \ ---from val \ ---keyring-backend test \ ---chain-id $CHAINID \ ---home $NODE_HOME \ ---node tcp://localhost:26657 \ ---yes -echo "Done \n" - -sleep 6 -echo "Casting vote... \n" - -$BINARY tx gov vote 1 yes \ ---from val \ ---keyring-backend test \ ---chain-id $CHAINID \ ---home $NODE_HOME \ ---gas auto \ ---fees 400uatom \ ---node tcp://localhost:26657 \ ---yes - -echo "Done \n" - -else -echo "Please build gaia v9 and move to ./build/gaiad9" -fi diff --git a/contrib/scripts/upgrade_test_scripts/v11/run_gaia_v10.sh b/contrib/scripts/upgrade_test_scripts/v11/run_gaia_v10.sh deleted file mode 100755 index 69b183e9..00000000 --- a/contrib/scripts/upgrade_test_scripts/v11/run_gaia_v10.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/bin/sh - -set -o errexit -o nounset - -NODE_HOME=$(realpath ./build/.gaia) -echo "NODE_HOME = ${NODE_HOME}" -BINARY=$NODE_HOME/cosmovisor/genesis/bin/gaiad -echo "BINARY = ${BINARY}" -CHAINID=cosmoshub-4 - -USER_MNEMONIC="abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon art" - -if ! test -f "./build/gaiad10"; then - echo "gaiad v10 does not exist" - exit -fi - - -rm -rf ./build/.gaia - -mkdir -p "$NODE_HOME"/cosmovisor/genesis/bin -cp ./build/gaiad10 "$NODE_HOME"/cosmovisor/genesis/bin/gaiad -$BINARY init upgrader --chain-id $CHAINID --home "$NODE_HOME" - - -if ! test -f "./build/gaiad11"; then - echo "gaiad v11 does not exist" - exit -fi - -mkdir -p "$NODE_HOME"/cosmovisor/upgrades/v11/bin -cp ./build/gaiad11 "$NODE_HOME"/cosmovisor/upgrades/v11/bin/gaiad - -GOPATH=$(go env GOPATH) - -export DAEMON_NAME=gaiad -export DAEMON_HOME=$NODE_HOME -COSMOVISOR=$GOPATH/bin/cosmovisor - - -$BINARY config chain-id $CHAINID --home $NODE_HOME -$BINARY config keyring-backend test --home $NODE_HOME -tmp=$(mktemp) - -# add bank part of genesis -jq --argjson foo "$(jq -c '.' contrib/denom.json)" '.app_state.bank.denom_metadata = $foo' $NODE_HOME/config/genesis.json > "$tmp" && mv "$tmp" $NODE_HOME/config/genesis.json - -# replace default stake token with uatom -sed -i -e 's/stake/uatom/g' $NODE_HOME/config/genesis.json -# min deposition amount (this one isn't working) -sed -i -e 's%"amount": "10000000",%"amount": "1",%g' $NODE_HOME/config/genesis.json -# min voting power that a proposal requires in order to be a valid proposal -sed -i -e 's%"quorum": "0.334000000000000000",%"quorum": "0.000000000000000001",%g' $NODE_HOME/config/genesis.json -# the minimum proportion of "yes" votes requires for the proposal to pass -sed -i -e 's%"threshold": "0.500000000000000000",%"threshold": "0.000000000000000001",%g' $NODE_HOME/config/genesis.json -# voting period to 30s -sed -i -e 's%"voting_period": "172800s"%"voting_period": "30s"%g' $NODE_HOME/config/genesis.json - -echo $USER_MNEMONIC | $BINARY --home $NODE_HOME keys add val --recover --keyring-backend=test -$BINARY add-genesis-account val 10000000000000000000000000uatom --home $NODE_HOME --keyring-backend test -$BINARY gentx val 1000000000uatom --home $NODE_HOME --chain-id $CHAINID -$BINARY collect-gentxs --home $NODE_HOME - -sed -i.bak'' 's/minimum-gas-prices = ""/minimum-gas-prices = "0uatom"/' $NODE_HOME/config/app.toml - -perl -i~ -0777 -pe 's/# Enable defines if the API server should be enabled. -enable = false/# Enable defines if the API server should be enabled. -enable = true/g' $NODE_HOME/config/app.toml - -pwd -ls $NODE_HOME - -$COSMOVISOR run start --home $NODE_HOME --x-crisis-skip-assert-invariants > v10.out 2>&1 & - diff --git a/contrib/scripts/upgrade_test_scripts/v11/run_upgrade_commands_v11.sh b/contrib/scripts/upgrade_test_scripts/v11/run_upgrade_commands_v11.sh deleted file mode 100755 index 8170378e..00000000 --- a/contrib/scripts/upgrade_test_scripts/v11/run_upgrade_commands_v11.sh +++ /dev/null @@ -1,76 +0,0 @@ -#!/bin/sh - -set -o errexit -o nounset - -UPGRADE_HEIGHT=$1 - -if [ -z "$1" ]; then - echo "Need to add an upgrade height" - exit 1 -fi - -NODE_HOME=$(realpath ./build/.gaia) - -echo "NODE_HOME = ${NODE_HOME}" - -BINARY=$NODE_HOME/cosmovisor/genesis/bin/gaiad -echo "BINARY = ${BINARY}" - -$BINARY version - -USER_MNEMONIC="abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon art" -CHAINID=cosmoshub-4 - -if test -f "$BINARY"; then - - echo "wait 10 seconds for blockchain to start" - sleep 10 - - $BINARY config chain-id $CHAINID --home $NODE_HOME - $BINARY config output json --home $NODE_HOME - $BINARY config keyring-backend test --home $NODE_HOME - $BINARY config --home $NODE_HOME - - - key=$($BINARY keys show val --home $NODE_HOME) - if [ -z "$key" ]; then - echo $USER_MNEMONIC | $BINARY --home $NODE_HOME keys add val --recover --keyring-backend=test - fi - - - echo "\n" - echo "Submitting proposal... \n" - $BINARY tx gov submit-proposal software-upgrade v11 \ - --title v11 \ - --deposit 10000000uatom \ - --upgrade-height $UPGRADE_HEIGHT \ - --upgrade-info "upgrade to v11" \ - --description "upgrade to v11" \ - --gas auto \ - --fees 400uatom \ - --from val \ - --keyring-backend test \ - --chain-id $CHAINID \ - --home $NODE_HOME \ - --node tcp://localhost:26657 \ - --yes - echo "Done \n" - - sleep 6 - echo "Casting vote... \n" - - $BINARY tx gov vote 1 yes \ - --from val \ - --keyring-backend test \ - --chain-id $CHAINID \ - --home $NODE_HOME \ - --gas auto \ - --fees 400uatom \ - --node tcp://localhost:26657 \ - --yes - - echo "Done \n" - -else - echo "Please build gaia v9 and move to ./build/gaiad9" -fi diff --git a/contrib/scripts/upgrade_test_scripts/v11/test_migration_v11.sh b/contrib/scripts/upgrade_test_scripts/v11/test_migration_v11.sh deleted file mode 100755 index 60ad1230..00000000 --- a/contrib/scripts/upgrade_test_scripts/v11/test_migration_v11.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash - -NODEADDR=$1 - -# Define default global fee module's params -# according to gaia/x/globalfee/types/params.go -default_globalfee_params=' -{ - "params": { - "minimum_gas_prices": [], - "bypass_min_fee_msg_types": [ - "/ibc.core.channel.v1.MsgRecvPacket", - "/ibc.core.channel.v1.MsgAcknowledgement", - "/ibc.core.client.v1.MsgUpdateClient", - "/ibc.core.channel.v1.MsgTimeout", - "/ibc.core.channel.v1.MsgTimeoutOnClose" - ], - "max_total_bypass_min_fee_msg_gas_usage": "1000000" - } -}' - -# Get current global fee default params -curr_params=$(curl -s $NODEADDR:1317/gaia/globalfee/v1beta1/params) - -# Check if retrieved params are equal to expected default params -DIFF=$(diff <(echo ${default_globalfee_params} | jq --sort-keys .) <(echo ${curr_params} | jq --sort-keys .)) - -if [ "$DIFF" != "" ] -then - printf "expected default global fee params:\n${DIFF}" - exit 1 -fi diff --git a/contrib/scripts/upgrade_test_scripts/v12/run_gaia_v11.sh b/contrib/scripts/upgrade_test_scripts/v12/run_gaia_v11.sh deleted file mode 100755 index 38040780..00000000 --- a/contrib/scripts/upgrade_test_scripts/v12/run_gaia_v11.sh +++ /dev/null @@ -1,73 +0,0 @@ -#!/bin/sh - -set -o errexit -o nounset - -NODE_HOME=$(realpath ./build/.gaia) -echo "NODE_HOME = ${NODE_HOME}" -BINARY=$NODE_HOME/cosmovisor/genesis/bin/gaiad -echo "BINARY = ${BINARY}" -CHAINID=cosmoshub-4 - -USER_MNEMONIC="abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon art" - -if ! test -f "./build/gaiad11"; then - echo "gaiad v11 does not exist" - exit -fi - - -rm -rf ./build/.gaia - -mkdir -p "$NODE_HOME"/cosmovisor/genesis/bin -cp ./build/gaiad11 "$NODE_HOME"/cosmovisor/genesis/bin/gaiad -$BINARY init upgrader --chain-id $CHAINID --home "$NODE_HOME" - - -if ! test -f "./build/gaiad12"; then - echo "gaiad v12 does not exist" - exit -fi - -mkdir -p "$NODE_HOME"/cosmovisor/upgrades/v12/bin -cp ./build/gaiad12 "$NODE_HOME"/cosmovisor/upgrades/v12/bin/gaiad - -GOPATH=$(go env GOPATH) - -export DAEMON_NAME=gaiad -export DAEMON_HOME=$NODE_HOME -COSMOVISOR=$GOPATH/bin/cosmovisor - - -$BINARY config chain-id $CHAINID --home $NODE_HOME -$BINARY config keyring-backend test --home $NODE_HOME -tmp=$(mktemp) - -# add bank part of genesis -jq --argjson foo "$(jq -c '.' contrib/denom.json)" '.app_state.bank.denom_metadata = $foo' $NODE_HOME/config/genesis.json > "$tmp" && mv "$tmp" $NODE_HOME/config/genesis.json - -# replace default stake token with uatom -sed -i -e 's/stake/uatom/g' $NODE_HOME/config/genesis.json -# min deposition amount (this one isn't working) -sed -i -e 's%"amount": "10000000",%"amount": "1",%g' $NODE_HOME/config/genesis.json -# min voting power that a proposal requires in order to be a valid proposal -sed -i -e 's%"quorum": "0.334000000000000000",%"quorum": "0.000000000000000001",%g' $NODE_HOME/config/genesis.json -# the minimum proportion of "yes" votes requires for the proposal to pass -sed -i -e 's%"threshold": "0.500000000000000000",%"threshold": "0.000000000000000001",%g' $NODE_HOME/config/genesis.json -# voting period to 30s -sed -i -e 's%"voting_period": "172800s"%"voting_period": "30s"%g' $NODE_HOME/config/genesis.json - -echo $USER_MNEMONIC | $BINARY --home $NODE_HOME keys add val --recover --keyring-backend=test -$BINARY add-genesis-account val 10000000000000000000000000uatom --home $NODE_HOME --keyring-backend test -$BINARY gentx val 1000000000uatom --home $NODE_HOME --chain-id $CHAINID -$BINARY collect-gentxs --home $NODE_HOME - -sed -i.bak'' 's/minimum-gas-prices = ""/minimum-gas-prices = "0uatom"/' $NODE_HOME/config/app.toml - -perl -i~ -0777 -pe 's/# Enable defines if the API server should be enabled. -enable = false/# Enable defines if the API server should be enabled. -enable = true/g' $NODE_HOME/config/app.toml - -pwd -ls $NODE_HOME - -$COSMOVISOR run start --home $NODE_HOME --x-crisis-skip-assert-invariants > v11.out 2>&1 & diff --git a/contrib/scripts/upgrade_test_scripts/v12/run_upgrade_commands_v12.sh b/contrib/scripts/upgrade_test_scripts/v12/run_upgrade_commands_v12.sh deleted file mode 100755 index 12c110f0..00000000 --- a/contrib/scripts/upgrade_test_scripts/v12/run_upgrade_commands_v12.sh +++ /dev/null @@ -1,73 +0,0 @@ -#!/bin/sh - -set -o errexit -o nounset - -UPGRADE_HEIGHT=$1 - -if [ -z "$1" ]; then -echo "Need to add an upgrade height" -exit 1 -fi - -NODE_HOME=$(realpath ./build/.gaia) - -echo "NODE_HOME = ${NODE_HOME}" - -BINARY=$NODE_HOME/cosmovisor/genesis/bin/gaiad -echo "BINARY = ${BINARY}" - -$BINARY version - -USER_MNEMONIC="abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon art" -CHAINID=cosmoshub-4 - -if test -f "$BINARY"; then - -echo "wait 10 seconds for blockchain to start" -sleep 10 - -$BINARY config chain-id $CHAINID --home $NODE_HOME -$BINARY config output json --home $NODE_HOME -$BINARY config keyring-backend test --home $NODE_HOME -$BINARY config --home $NODE_HOME - -key=$($BINARY keys show val --home $NODE_HOME) -if [ -z "$key" ]; then -echo $USER_MNEMONIC | $BINARY --home $NODE_HOME keys add val --recover --keyring-backend=test -fi - - -echo "\n" -echo "Submitting proposal... \n" -$BINARY tx gov submit-proposal software-upgrade v12 \ ---title v12 \ ---deposit 10000000uatom \ ---upgrade-height $UPGRADE_HEIGHT \ ---upgrade-info "upgrade to v12" \ ---description "upgrade to v12" \ ---fees 400uatom \ ---from val \ ---keyring-backend test \ ---chain-id $CHAINID \ ---home $NODE_HOME \ ---node tcp://localhost:26657 \ ---yes -echo "Done \n" - -sleep 6 -echo "Casting vote... \n" - -$BINARY tx gov vote 1 yes \ ---from val \ ---keyring-backend test \ ---chain-id $CHAINID \ ---home $NODE_HOME \ ---fees 400uatom \ ---node tcp://localhost:26657 \ ---yes - -echo "Done \n" - -else -echo "Please build gaia v11 and move to ./build/gaiad11" -fi diff --git a/contrib/scripts/upgrade_test_scripts/v8/run-gaia-v7.sh b/contrib/scripts/upgrade_test_scripts/v8/run-gaia-v7.sh deleted file mode 100755 index b91ee0ad..00000000 --- a/contrib/scripts/upgrade_test_scripts/v8/run-gaia-v7.sh +++ /dev/null @@ -1,71 +0,0 @@ -#!/bin/sh - -set -o errexit -o nounset - -NODE_HOME=$(realpath ./build/.gaia) -echo "NODE_HOME = ${NODE_HOME}" -BINARY=$NODE_HOME/cosmovisor/genesis/bin/gaiad -echo "BINARY = ${BINARY}" -CHAINID=cosmoshub-4 - -USER_MNEMONIC="abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon art" - -if ! test -f "./build/gaiad7"; then - echo "gaiad v7 does not exist" - exit -fi - - -rm -rf ./build/.gaia - -mkdir -p "$NODE_HOME"/cosmovisor/genesis/bin -cp ./build/gaiad7 "$NODE_HOME"/cosmovisor/genesis/bin/gaiad -$BINARY init upgrader --chain-id $CHAINID --home "$NODE_HOME" - - -if ! test -f "./build/gaiad8"; then - echo "gaiad v8 does not exist" - exit -fi - -mkdir -p "$NODE_HOME"/cosmovisor/upgrades/v8-Rho/bin -cp ./build/gaiad8 "$NODE_HOME"/cosmovisor/upgrades/v8-Rho/bin/gaiad - -GOPATH=$(go env GOPATH) - -export DAEMON_NAME=gaiad -export DAEMON_HOME=$NODE_HOME -COSMOVISOR=$GOPATH/bin/cosmovisor - - -$BINARY config chain-id $CHAINID --home $NODE_HOME -$BINARY config keyring-backend test --home $NODE_HOME -tmp=$(mktemp) - -# add bank part of genesis -jq --argjson foo "$(jq -c '.' contrib/denom.json)" '.app_state.bank.denom_metadata = $foo' $NODE_HOME/config/genesis.json > "$tmp" && mv "$tmp" $NODE_HOME/config/genesis.json - -# replace default stake token with uatom -sed -i -e 's/stake/uatom/g' $NODE_HOME/config/genesis.json -# min deposition amount (this one isn't working) -sed -i -e 's%"amount": "10000000",%"amount": "1",%g' $NODE_HOME/config/genesis.json -# min voting power that a proposal requires in order to be a valid proposal -sed -i -e 's%"quorum": "0.334000000000000000",%"quorum": "0.000000000000000001",%g' $NODE_HOME/config/genesis.json -# the minimum proportion of "yes" votes requires for the proposal to pass -sed -i -e 's%"threshold": "0.500000000000000000",%"threshold": "0.000000000000000001",%g' $NODE_HOME/config/genesis.json -# voting period to 30s -sed -i -e 's%"voting_period": "172800s"%"voting_period": "30s"%g' $NODE_HOME/config/genesis.json - -echo $USER_MNEMONIC | $BINARY --home $NODE_HOME keys add val --recover --keyring-backend=test -$BINARY add-genesis-account val 10000000000000000000000000uatom --home $NODE_HOME --keyring-backend test -$BINARY gentx val 1000000000uatom --home $NODE_HOME --chain-id $CHAINID -$BINARY collect-gentxs --home $NODE_HOME - -sed -i.bak'' 's/minimum-gas-prices = ""/minimum-gas-prices = "0uatom"/' $NODE_HOME/config/app.toml - -perl -i~ -0777 -pe 's/# Enable defines if the API server should be enabled. -enable = false/# Enable defines if the API server should be enabled. -enable = true/g' $NODE_HOME/config/app.toml - -$COSMOVISOR start --home $NODE_HOME --x-crisis-skip-assert-invariants - diff --git a/contrib/scripts/upgrade_test_scripts/v8/run-upgrade-commands-v8-rho.sh b/contrib/scripts/upgrade_test_scripts/v8/run-upgrade-commands-v8-rho.sh deleted file mode 100755 index a0419c6c..00000000 --- a/contrib/scripts/upgrade_test_scripts/v8/run-upgrade-commands-v8-rho.sh +++ /dev/null @@ -1,75 +0,0 @@ -#!/bin/sh - -set -o errexit -o nounset - -UPGRADE_HEIGHT=$1 - -if [ -z "$1" ]; then - echo "Need to add an upgrade height" - exit 1 -fi - -# NODE_HOME=./build/.gaia -NODE_HOME=$(realpath ./build/.gaia) -echo "NODE_HOME = ${NODE_HOME}" - -BINARY=$NODE_HOME/cosmovisor/genesis/bin/gaiad -echo "BINARY = ${BINARY}" - -USER_MNEMONIC="abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon art" -CHAINID=cosmoshub-4 - -if test -f "$BINARY"; then - - echo "wait 10 seconds for blockchain to start" - sleep 10 - - $BINARY config chain-id $CHAINID --home $NODE_HOME - $BINARY config output json --home $NODE_HOME - $BINARY config keyring-backend test --home $NODE_HOME - $BINARY config --home $NODE_HOME - - - key=$($BINARY keys show val --home $NODE_HOME) - if [ key == "" ]; then - echo $USER_MNEMONIC | $BINARY --home $NODE_HOME keys add val --recover --keyring-backend=test - fi - - # $BINARY keys list --home $NODE_HOME - - echo "\n" - echo "Submitting proposal... \n" - $BINARY tx gov submit-proposal software-upgrade v8-Rho \ - --title v8-Rho \ - --deposit 10000000uatom \ - --upgrade-height $UPGRADE_HEIGHT \ - --upgrade-info "upgrade to v8-Rho" \ - --description "upgrade to v8-Rho" \ - --gas auto \ - --fees 400uatom \ - --from val \ - --keyring-backend test \ - --chain-id $CHAINID \ - --home $NODE_HOME \ - --node tcp://localhost:26657 \ - --yes - echo "Done \n" - - sleep 6 - echo "Casting vote... \n" - - $BINARY tx gov vote 1 yes \ - --from val \ - --keyring-backend test \ - --chain-id $CHAINID \ - --home $NODE_HOME \ - --gas auto \ - --fees 400uatom \ - --node tcp://localhost:26657 \ - --yes - - echo "Done \n" - -else - echo "Please build gaia v7 and move to ./build/gaiad7" -fi \ No newline at end of file diff --git a/contrib/scripts/upgrade_test_scripts/v9/run-gaia-v8.sh b/contrib/scripts/upgrade_test_scripts/v9/run-gaia-v8.sh deleted file mode 100755 index 83c0cab1..00000000 --- a/contrib/scripts/upgrade_test_scripts/v9/run-gaia-v8.sh +++ /dev/null @@ -1,71 +0,0 @@ -#!/bin/sh - -set -o errexit -o nounset - -NODE_HOME=$(realpath ./build/.gaia) -echo "NODE_HOME = ${NODE_HOME}" -BINARY=$NODE_HOME/cosmovisor/genesis/bin/gaiad -echo "BINARY = ${BINARY}" -CHAINID=cosmoshub-4 - -USER_MNEMONIC="abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon art" - -if ! test -f "./build/gaiad8"; then - echo "gaiad v8 does not exist" - exit -fi - - -rm -rf ./build/.gaia - -mkdir -p "$NODE_HOME"/cosmovisor/genesis/bin -cp ./build/gaiad8 "$NODE_HOME"/cosmovisor/genesis/bin/gaiad -$BINARY init upgrader --chain-id $CHAINID --home "$NODE_HOME" - - -if ! test -f "./build/gaiad9"; then - echo "gaiad v9 does not exist" - exit -fi - -mkdir -p "$NODE_HOME"/cosmovisor/upgrades/v9-lambda/bin -cp ./build/gaiad9 "$NODE_HOME"/cosmovisor/upgrades/v9-lambda/bin/gaiad - -GOPATH=$(go env GOPATH) - -export DAEMON_NAME=gaiad -export DAEMON_HOME=$NODE_HOME -COSMOVISOR=$GOPATH/bin/cosmovisor - - -$BINARY config chain-id $CHAINID --home $NODE_HOME -$BINARY config keyring-backend test --home $NODE_HOME -tmp=$(mktemp) - -# add bank part of genesis -jq --argjson foo "$(jq -c '.' contrib/denom.json)" '.app_state.bank.denom_metadata = $foo' $NODE_HOME/config/genesis.json > "$tmp" && mv "$tmp" $NODE_HOME/config/genesis.json - -# replace default stake token with uatom -sed -i -e 's/stake/uatom/g' $NODE_HOME/config/genesis.json -# min deposition amount (this one isn't working) -sed -i -e 's%"amount": "10000000",%"amount": "1",%g' $NODE_HOME/config/genesis.json -# min voting power that a proposal requires in order to be a valid proposal -sed -i -e 's%"quorum": "0.334000000000000000",%"quorum": "0.000000000000000001",%g' $NODE_HOME/config/genesis.json -# the minimum proportion of "yes" votes requires for the proposal to pass -sed -i -e 's%"threshold": "0.500000000000000000",%"threshold": "0.000000000000000001",%g' $NODE_HOME/config/genesis.json -# voting period to 30s -sed -i -e 's%"voting_period": "172800s"%"voting_period": "30s"%g' $NODE_HOME/config/genesis.json - -echo $USER_MNEMONIC | $BINARY --home $NODE_HOME keys add val --recover --keyring-backend=test -$BINARY add-genesis-account val 10000000000000000000000000uatom --home $NODE_HOME --keyring-backend test -$BINARY gentx val 1000000000uatom --home $NODE_HOME --chain-id $CHAINID -$BINARY collect-gentxs --home $NODE_HOME - -sed -i.bak'' 's/minimum-gas-prices = ""/minimum-gas-prices = "0uatom"/' $NODE_HOME/config/app.toml - -perl -i~ -0777 -pe 's/# Enable defines if the API server should be enabled. -enable = false/# Enable defines if the API server should be enabled. -enable = true/g' $NODE_HOME/config/app.toml - -$COSMOVISOR run start --home $NODE_HOME --x-crisis-skip-assert-invariants - diff --git a/contrib/scripts/upgrade_test_scripts/v9/run-upgrade-commands.sh b/contrib/scripts/upgrade_test_scripts/v9/run-upgrade-commands.sh deleted file mode 100755 index fa8b5922..00000000 --- a/contrib/scripts/upgrade_test_scripts/v9/run-upgrade-commands.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/bin/sh - -set -o errexit -o nounset - -UPGRADE_HEIGHT=$1 - -if [ -z "$1" ]; then - echo "Need to add an upgrade height" - exit 1 -fi - -# NODE_HOME=./build/.gaia -NODE_HOME=$(realpath ./build/.gaia) -echo "NODE_HOME = ${NODE_HOME}" - -BINARY=$NODE_HOME/cosmovisor/genesis/bin/gaiad -echo "BINARY = ${BINARY}" - -USER_MNEMONIC="abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon art" -CHAINID=cosmoshub-4 - -if test -f "$BINARY"; then - - echo "wait 10 seconds for blockchain to start" - sleep 10 - - $BINARY config chain-id $CHAINID --home $NODE_HOME - $BINARY config output json --home $NODE_HOME - $BINARY config keyring-backend test --home $NODE_HOME - $BINARY config --home $NODE_HOME - - - key=$($BINARY keys show val --home $NODE_HOME) - if [ -z "$key" ]; then - echo $USER_MNEMONIC | $BINARY --home $NODE_HOME keys add val --recover --keyring-backend=test - fi - - - echo "\n" - echo "Submitting proposal... \n" - $BINARY tx gov submit-proposal software-upgrade v9-Lambda \ - --title v9-Lambda \ - --deposit 10000000uatom \ - --upgrade-height $UPGRADE_HEIGHT \ - --upgrade-info "upgrade to v9-Lambda" \ - --description "upgrade to v9-Lambda" \ - --gas auto \ - --fees 400uatom \ - --from val \ - --keyring-backend test \ - --chain-id $CHAINID \ - --home $NODE_HOME \ - --node tcp://localhost:26657 \ - --yes - echo "Done \n" - - sleep 6 - echo "Casting vote... \n" - - $BINARY tx gov vote 1 yes \ - --from val \ - --keyring-backend test \ - --chain-id $CHAINID \ - --home $NODE_HOME \ - --gas auto \ - --fees 400uatom \ - --node tcp://localhost:26657 \ - --yes - - echo "Done \n" - -else - echo "Please build gaia v8 and move to ./build/gaiad8" -fi diff --git a/contrib/single-node.sh b/contrib/single-node.sh index ae22fb1a..1b3eb87b 100755 --- a/contrib/single-node.sh +++ b/contrib/single-node.sh @@ -17,20 +17,20 @@ fi # Build genesis file incl account for passed address coins="10000000000stake,100000000000samoleans" -gaiad init --chain-id $CHAINID $CHAINID -gaiad keys add validator --keyring-backend="test" -gaiad add-genesis-account $(gaiad keys show validator -a --keyring-backend="test") $coins -gaiad add-genesis-account $GENACCT $coins -gaiad gentx validator 5000000000stake --keyring-backend="test" --chain-id $CHAINID -gaiad collect-gentxs +atomoned init --chain-id $CHAINID $CHAINID +atomoned keys add validator --keyring-backend="test" +atomoned add-genesis-account $(atomoned keys show validator -a --keyring-backend="test") $coins +atomoned add-genesis-account $GENACCT $coins +atomoned gentx validator 5000000000stake --keyring-backend="test" --chain-id $CHAINID +atomoned collect-gentxs # Set proper defaults and change ports echo "Setting rpc listen address" -sed -i '' 's#"tcp://127.0.0.1:26657"#"tcp://0.0.0.0:26657"#g' ~/.gaia/config/config.toml +sed -i '' 's#"tcp://127.0.0.1:26657"#"tcp://0.0.0.0:26657"#g' ~/.atomone/config/config.toml echo 2 -sed -i '' 's/timeout_commit = "5s"/timeout_commit = "1s"/g' ~/.gaia/config/config.toml -sed -i '' 's/timeout_propose = "3s"/timeout_propose = "1s"/g' ~/.gaia/config/config.toml -sed -i '' 's/index_all_keys = false/index_all_keys = true/g' ~/.gaia/config/config.toml +sed -i '' 's/timeout_commit = "5s"/timeout_commit = "1s"/g' ~/.atomone/config/config.toml +sed -i '' 's/timeout_propose = "3s"/timeout_propose = "1s"/g' ~/.atomone/config/config.toml +sed -i '' 's/index_all_keys = false/index_all_keys = true/g' ~/.atomone/config/config.toml -# Start the gaia -gaiad start --pruning=nothing +# Start the atomone +atomoned start --pruning=nothing diff --git a/contrib/statesync.bash b/contrib/statesync.bash deleted file mode 100644 index 634c5696..00000000 --- a/contrib/statesync.bash +++ /dev/null @@ -1,63 +0,0 @@ -#!/bin/bash -# microtick and bitcanna contributed significantly here. -# Pebbledb state sync script. -# invoke like: bash contrib/statesync.bash - -## USAGE RUNDOWN -# Not for use on live nodes -# For use when testing. - -set -uxe - -# Set Golang environment variables. -# ! Adapt as required, depending on your system configuration -export GOPATH=~/go -export PATH=$PATH:~/go/bin - -# Install with pebbledb (uncomment for incredible performance) -# go mod edit -replace github.com/tendermint/tm-db=github.com/baabeetaa/tm-db@pebble -# go mod tidy - -# go install -ldflags '-w -s -X github.com/cosmos/cosmos-sdk/types.DBBackend=pebbledb -X github.com/tendermint/tm-db.ForceSync=1' -tags pebbledb ./... - -# install (comment if using pebble for incredible performance) -# go install ./... - -# NOTE: ABOVE YOU CAN USE ALTERNATIVE DATABASES, HERE ARE THE EXACT COMMANDS -# go install -ldflags '-w -s -X github.com/cosmos/cosmos-sdk/types.DBBackend=rocksdb' -tags rocksdb ./... -# go install -ldflags '-w -s -X github.com/cosmos/cosmos-sdk/types.DBBackend=badgerdb' -tags badgerdb ./... -# go install -ldflags '-w -s -X github.com/cosmos/cosmos-sdk/types.DBBackend=boltdb' -tags boltdb ./... -# go install -ldflags '-w -s -X github.com/cosmos/cosmos-sdk/types.DBBackend=pebbledb -X github.com/tendermint/tm-db.ForceSync=1' -tags pebbledb ./... - - -# Initialize chain. -gaiad init test - -# Get Genesis -wget https://github.com/cosmos/mainnet/raw/master/genesis/genesis.cosmoshub-4.json.gz -gzip -d genesis.cosmoshub-4.json.gz -mv genesis.cosmoshub-4.json ~/.gaia/config/genesis.json - -# Get "trust_hash" and "trust_height". -INTERVAL=100 -LATEST_HEIGHT=$(curl -s https://cosmos-rpc.polkachu.com/block | jq -r .result.block.header.height) -BLOCK_HEIGHT=$((LATEST_HEIGHT - INTERVAL)) -TRUST_HASH=$(curl -s "https://cosmos-rpc.polkachu.com/block?height=$BLOCK_HEIGHT" | jq -r .result.block_id.hash) - -# Print out block and transaction hash from which to sync state. -echo "trust_height: $BLOCK_HEIGHT" -echo "trust_hash: $TRUST_HASH" - -# Export state sync variables. -export GAIAD_STATESYNC_ENABLE=true -export GAIAD_P2P_MAX_NUM_OUTBOUND_PEERS=200 -export GAIAD_STATESYNC_RPC_SERVERS="https://cosmos-rpc.polkachu.com:443,https://rpc-cosmoshub-ia.cosmosia.notional.ventures:443" -export GAIAD_STATESYNC_TRUST_HEIGHT=$BLOCK_HEIGHT -export GAIAD_STATESYNC_TRUST_HASH=$TRUST_HASH - -# Fetch and set list of seeds from chain registry. -GAIAD_P2P_SEEDS=$(curl -s https://raw.githubusercontent.com/cosmos/chain-registry/master/cosmoshub/chain.json | jq -r '[foreach .peers.seeds[] as $item (""; "\($item.id)@\($item.address)")] | join(",")') -export GAIAD_P2P_SEEDS - -# Start chain. -gaiad start --x-crisis-skip-assert-invariants --iavl-disable-fastnode false diff --git a/contrib/testnets/Makefile b/contrib/testnets/Makefile deleted file mode 100644 index 36db88f4..00000000 --- a/contrib/testnets/Makefile +++ /dev/null @@ -1,143 +0,0 @@ -######################################## -### These targets were broken out of the main Makefile to enable easy setup of testnets. -### They use a form of terraform + ansible to build full nodes in AWS. -### The shell scripts in this folder are example uses of the targets. - -# Name of the testnet. Used in chain-id. -TESTNET_NAME?=remotenet - -# Name of the servers grouped together for management purposes. Used in tagging the servers in the cloud. -CLUSTER_NAME?=$(TESTNET_NAME) - -# Number of servers to put in one availability zone in AWS. -SERVERS?=1 - -# Number of regions to use in AWS. One region usually contains 2-3 availability zones. -REGION_LIMIT?=1 - -# Path to gaiad for deployment. Must be a Linux binary. -BINARY?=$(CURDIR)/../build/gaiad -GAIACLI_BINARY?=$(CURDIR)/../build/gaiacli - -# Path to the genesis.json and config.toml files to deploy on full nodes. -GENESISFILE?=$(CURDIR)/../build/genesis.json -CONFIGFILE?=$(CURDIR)/../build/config.toml - -# Name of application for app deployments -APP_NAME ?= faucettestnet1 -# Region to deploy VPC and application in AWS -REGION ?= us-east-2 - -all: - @echo "There is no all. Only sum of the ones." - -disclaimer: - @echo "WARNING: These are example network configuration scripts only and have not undergone security review. They should not be used for production deployments." - -######################################## -### Extract genesis.json and config.toml from a node in a cluster - -extract-config: disclaimer - #Make sure you have AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY or your IAM roles set for AWS API access. - @if ! [ -f $(HOME)/.ssh/id_rsa.pub ]; then ssh-keygen ; fi - cd remote/ansible && \ - ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook \ - -i inventory/ec2.py \ - -l "tag_Environment_$(CLUSTER_NAME)" \ - -b -u centos \ - -e TESTNET_NAME="$(TESTNET_NAME)" \ - -e GENESISFILE="$(GENESISFILE)" \ - -e CONFIGFILE="$(CONFIGFILE)" \ - extract-config.yml - - -######################################## -### Remote validator nodes using terraform and ansible in AWS - -validators-start: disclaimer - #Make sure you have AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY or your IAM roles set for AWS API access. - @if ! [ -f $(HOME)/.ssh/id_rsa.pub ]; then ssh-keygen ; fi - @if [ -z "`file $(BINARY) | grep 'ELF 64-bit'`" ]; then echo "Please build a linux binary using 'make build-linux'." ; false ; fi - cd remote/terraform-aws && terraform init && (terraform workspace new "$(CLUSTER_NAME)" || terraform workspace select "$(CLUSTER_NAME)") && terraform apply -auto-approve -var SSH_PUBLIC_FILE="$(HOME)/.ssh/id_rsa.pub" -var SSH_PRIVATE_FILE="$(HOME)/.ssh/id_rsa" -var TESTNET_NAME="$(CLUSTER_NAME)" -var SERVERS="$(SERVERS)" -var REGION_LIMIT="$(REGION_LIMIT)" - cd remote/ansible && ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i inventory/ec2.py -l "tag_Environment_$(CLUSTER_NAME)" -u centos -b -e BINARY=$(BINARY) -e TESTNET_NAME="$(TESTNET_NAME)" setup-validators.yml - cd remote/ansible && ansible-playbook -i inventory/ec2.py -l "tag_Environment_$(CLUSTER_NAME)" -u centos -b start.yml - -validators-stop: disclaimer - cd remote/terraform-aws && terraform workspace select "$(CLUSTER_NAME)" && terraform destroy -force -var SSH_PUBLIC_FILE="$(HOME)/.ssh/id_rsa.pub" -var SSH_PRIVATE_FILE="$(HOME)/.ssh/id_rsa" && terraform workspace select default && terraform workspace delete "$(CLUSTER_NAME)" - rm -rf remote/ansible/keys/ remote/ansible/files/ - -validators-status: disclaimer - cd remote/ansible && ansible-playbook -i inventory/ec2.py -l "tag_Environment_$(CLUSTER_NAME)" status.yml - -#validators-clear: -# cd remote/ansible && ansible-playbook -i inventory/ec2.py -l "tag_Environment_$(CLUSTER_NAME)" -u centos -b clear-config.yml - - -######################################## -### Remote full nodes using terraform and ansible in Amazon AWS - -fullnodes-start: disclaimer - #Make sure you have AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY or your IAM roles set for AWS API access. - @if ! [ -f $(HOME)/.ssh/id_rsa.pub ]; then ssh-keygen ; fi - @if [ -z "`file $(BINARY) | grep 'ELF 64-bit'`" ]; then echo "Please build a linux binary using 'make build-linux'." ; false ; fi - cd remote/terraform-aws && terraform init && (terraform workspace new "$(CLUSTER_NAME)" || terraform workspace select "$(CLUSTER_NAME)") && terraform apply -auto-approve -var SSH_PUBLIC_FILE="$(HOME)/.ssh/id_rsa.pub" -var SSH_PRIVATE_FILE="$(HOME)/.ssh/id_rsa" -var TESTNET_NAME="$(CLUSTER_NAME)" -var SERVERS="$(SERVERS)" -var REGION_LIMIT="$(REGION_LIMIT)" - cd remote/ansible && ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i inventory/ec2.py -l "tag_Environment_$(CLUSTER_NAME)" -u centos -b -e BINARY=$(BINARY) -e TESTNET_NAME="$(TESTNET_NAME)" -e GENESISFILE="$(GENESISFILE)" -e CONFIGFILE="$(CONFIGFILE)" setup-fullnodes.yml - cd remote/ansible && ansible-playbook -i inventory/ec2.py -l "tag_Environment_$(CLUSTER_NAME)" -u centos -b start.yml - -fullnodes-stop: disclaimer - cd remote/terraform-aws && terraform workspace select "$(CLUSTER_NAME)" && terraform destroy -force -var SSH_PUBLIC_FILE="$(HOME)/.ssh/id_rsa.pub" -var SSH_PRIVATE_FILE="$(HOME)/.ssh/id_rsa" && terraform workspace select default && terraform workspace delete "$(CLUSTER_NAME)" - rm -rf remote/ansible/keys/ remote/ansible/files/ - -fullnodes-status: disclaimer - cd remote/ansible && ansible-playbook -i inventory/ec2.py -l "tag_Environment_$(CLUSTER_NAME)" status.yml - -######################################## -### Other calls - -upgrade-gaiad: disclaimer - #Make sure you have AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY or your IAM roles set for AWS API access. - @if ! [ -f $(HOME)/.ssh/id_rsa.pub ]; then ssh-keygen ; fi - @if [ -z "`file $(BINARY) | grep 'ELF 64-bit'`" ]; then echo "Please build a linux binary using 'make build-linux'." ; false ; fi - cd remote/ansible && ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i inventory/ec2.py -l "tag_Environment_$(CLUSTER_NAME)" -u centos -b -e BINARY=$(BINARY) upgrade-gaiad.yml - -UNSAFE_RESET_ALL?=no -upgrade-seeds: disclaimer - #Make sure you have AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY or your IAM roles set for AWS API access. - @if ! [ -f $(HOME)/.ssh/id_rsa.pub ]; then ssh-keygen ; fi - @if [ -z "`file $(BINARY) | grep 'ELF 64-bit'`" ]; then echo "Please build a linux binary using 'make build-linux'." ; false ; fi - @if [ -z "`file $(GAIACLI_BINARY) | grep 'ELF 64-bit'`" ]; then echo "Please build a linux binary using 'make build-linux'." ; false ; fi - cd remote/ansible && ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i inventory/ec2.py -l "tag_Environment_$(CLUSTER_NAME)" -u centos -b -e BINARY=$(BINARY) -e GAIACLI_BINARY=$(GAIACLI_BINARY) -e UNSAFE_RESET_ALL=$(UNSAFE_RESET_ALL) upgrade-gaia.yml - - -list: - remote/ansible/inventory/ec2.py | python -c 'import json,sys ; print "\n".join(json.loads("".join(sys.stdin.readlines()))["tag_Environment_$(CLUSTER_NAME)"])' - -install-datadog: disclaimer - #Make sure you have AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY or your IAM roles set for AWS API access. - @if [ -z "$(DD_API_KEY)" ]; then echo "DD_API_KEY environment variable not set." ; false ; fi - cd remote/ansible && ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i inventory/ec2.py -l "tag_Environment_$(CLUSTER_NAME)" -u centos -b -e DD_API_KEY="$(DD_API_KEY)" -e TESTNET_NAME="$(TESTNET_NAME)" -e CLUSTER_NAME="$(CLUSTER_NAME)" install-datadog-agent.yml - -remove-datadog: disclaimer - #Make sure you have AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY or your IAM roles set for AWS API access. - cd remote/ansible && ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i inventory/ec2.py -l "tag_Environment_$(CLUSTER_NAME)" -u centos -b remove-datadog-agent.yml - - -######################################## -### Application infrastructure setup - -app-start: disclaimer - #Make sure you have AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY or your IAM roles set for AWS API access. - @if ! [ -f $(HOME)/.ssh/id_rsa.pub ]; then ssh-keygen ; fi - @if [ -z "`file $(BINARY) | grep 'ELF 64-bit'`" ]; then echo "Please build a linux binary using 'make build-linux'." ; false ; fi - cd remote/terraform-app && terraform init && (terraform workspace new "$(APP_NAME)" || terraform workspace select "$(APP_NAME)") && terraform apply -auto-approve -var SSH_PUBLIC_FILE="$(HOME)/.ssh/id_rsa.pub" -var SSH_PRIVATE_FILE="$(HOME)/.ssh/id_rsa" -var APP_NAME="$(APP_NAME)" -var SERVERS="$(SERVERS)" -var REGION="$(REGION)" - cd remote/ansible && ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i inventory/ec2.py -l "tag_Environment_$(APP_NAME)" -u centos -b -e BINARY=$(BINARY) -e TESTNET_NAME="$(TESTNET_NAME)" -e GENESISFILE="$(GENESISFILE)" -e CONFIGFILE="$(CONFIGFILE)" setup-fullnodes.yml - cd remote/ansible && ansible-playbook -i inventory/ec2.py -l "tag_Environment_$(APP_NAME)" -u centos -b start.yml - -app-stop: disclaimer - cd remote/terraform-app && terraform workspace select "$(APP_NAME)" && terraform destroy -force -var SSH_PUBLIC_FILE="$(HOME)/.ssh/id_rsa.pub" -var SSH_PRIVATE_FILE="$(HOME)/.ssh/id_rsa" -var APP_NAME=$(APP_NAME) && terraform workspace select default && terraform workspace delete "$(APP_NAME)" - rm -rf remote/ansible/keys/ remote/ansible/files/ - -# To avoid unintended conflicts with file names, always add to .PHONY -# unless there is a reason not to. -# https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html -.PHONY: all extract-config validators-start validators-stop validators-status fullnodes-start fullnodes-stop fullnodes-status upgrade-gaiad list install-datadog remove-datadog app-start app-stop diff --git a/contrib/testnets/README.md b/contrib/testnets/README.md deleted file mode 100644 index f4a224f0..00000000 --- a/contrib/testnets/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# Networks - -Here contains the files required for automated deployment of either local or remote testnets. - -Doing so is best accomplished using the `make` targets. For more information, see the -[networks documentation](../docs/hub-tutorials/deploy-testnet.md) diff --git a/contrib/testnets/add-cluster.sh b/contrib/testnets/add-cluster.sh deleted file mode 100755 index a8936a09..00000000 --- a/contrib/testnets/add-cluster.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/sh -# add-cluster - example make call to add a set of nodes to an existing testnet in AWS -# WARNING: Run it from the current directory - it uses relative paths to ship the binary and the genesis.json,config.toml files - -if [ $# -ne 4 ]; then - echo "Usage: ./add-cluster.sh " - exit 1 -fi -set -eux - -# The testnet name is the same on all nodes -export TESTNET_NAME=$1 -export CLUSTER_NAME=$2 -export REGION_LIMIT=$3 -export SERVERS=$4 - -# Build the AWS full nodes -rm -rf remote/ansible/keys -make fullnodes-start - -# Save the private key seed words from the nodes -SEEDFOLDER="${TESTNET_NAME}-${CLUSTER_NAME}-seedwords" -mkdir -p "${SEEDFOLDER}" -test ! -f "${SEEDFOLDER}/node0" && mv remote/ansible/keys/* "${SEEDFOLDER}" - diff --git a/contrib/testnets/add-datadog.sh b/contrib/testnets/add-datadog.sh deleted file mode 100755 index 6432cc9e..00000000 --- a/contrib/testnets/add-datadog.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/sh -# add-datadog - add datadog agent to a set of nodes - -if [ $# -ne 2 ]; then - echo "Usage: ./add-datadog.sh " - exit 1 -fi -set -eux - -export TESTNET_NAME=$1 -export CLUSTER_NAME=$2 - -make install-datadog - diff --git a/contrib/testnets/del-cluster.sh b/contrib/testnets/del-cluster.sh deleted file mode 100755 index 0c4dec8d..00000000 --- a/contrib/testnets/del-cluster.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/sh -# del-cluster - example make call to delete a set of nodes on an existing testnet in AWS - -if [ $# -ne 1 ]; then - echo "Usage: ./add-cluster.sh " - exit 1 -fi -set -eux - -export CLUSTER_NAME=$1 - -# Delete the AWS nodes -make fullnodes-stop - diff --git a/contrib/testnets/del-datadog.sh b/contrib/testnets/del-datadog.sh deleted file mode 100755 index c9bf3352..00000000 --- a/contrib/testnets/del-datadog.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/sh -# del-datadog - aremove datadog agent from a set of nodes - -if [ $# -ne 1 ]; then - echo "Usage: ./del-datadog.sh " - exit 1 -fi -set -eux - -export CLUSTER_NAME=$1 - -make remove-datadog - diff --git a/contrib/testnets/list.sh b/contrib/testnets/list.sh deleted file mode 100755 index fd1b132f..00000000 --- a/contrib/testnets/list.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/sh -# list - list the IPs of a set of nodes - -if [ $# -ne 1 ]; then - echo "Usage: ./list.sh " - exit 1 -fi -set -eux - -export CLUSTER_NAME=$1 - -make list - diff --git a/contrib/testnets/new-testnet.sh b/contrib/testnets/new-testnet.sh deleted file mode 100755 index ae7b73de..00000000 --- a/contrib/testnets/new-testnet.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/sh -# new-testnet - example make call to create a new set of validator nodes in AWS -# WARNING: Run it from the current directory - it uses relative paths to ship the binary - -if [ $# -ne 4 ]; then - echo "Usage: ./new-testnet.sh " - exit 1 -fi -set -eux - -if [ -z "`file ../build/gaiad | grep 'ELF 64-bit'`" ]; then - # Build the linux binary we're going to ship to the nodes - make -C .. build-linux -fi - -# The testnet name is the same on all nodes -export TESTNET_NAME=$1 -export CLUSTER_NAME=$2 -export REGION_LIMIT=$3 -export SERVERS=$4 - -# Build the AWS validator nodes and extract the genesis.json and config.toml from one of them -rm -rf remote/ansible/keys -make validators-start extract-config - -# Save the private key seed words from the validators -SEEDFOLDER="${TESTNET_NAME}-${CLUSTER_NAME}-seedwords" -mkdir -p "${SEEDFOLDER}" -test ! -f "${SEEDFOLDER}/node0" && mv remote/ansible/keys/* "${SEEDFOLDER}" - diff --git a/contrib/testnets/remote/ansible/.gitignore b/contrib/testnets/remote/ansible/.gitignore deleted file mode 100644 index bebb9186..00000000 --- a/contrib/testnets/remote/ansible/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -*.retry -files/* -keys/* diff --git a/contrib/testnets/remote/ansible/add-lcd.yml b/contrib/testnets/remote/ansible/add-lcd.yml deleted file mode 100644 index bdc07034..00000000 --- a/contrib/testnets/remote/ansible/add-lcd.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- - -- hosts: all - any_errors_fatal: true - gather_facts: no - roles: - - add-lcd - diff --git a/contrib/testnets/remote/ansible/clear-config.yml b/contrib/testnets/remote/ansible/clear-config.yml deleted file mode 100644 index 80831e75..00000000 --- a/contrib/testnets/remote/ansible/clear-config.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- - -- hosts: all - any_errors_fatal: true - gather_facts: no - roles: - - clear-config - diff --git a/contrib/testnets/remote/ansible/extract-config.yml b/contrib/testnets/remote/ansible/extract-config.yml deleted file mode 100644 index d901bb69..00000000 --- a/contrib/testnets/remote/ansible/extract-config.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- - -- hosts: all - any_errors_fatal: true - gather_facts: no - roles: - - extract-config - diff --git a/contrib/testnets/remote/ansible/increase-openfiles.yml b/contrib/testnets/remote/ansible/increase-openfiles.yml deleted file mode 100644 index 1adcb821..00000000 --- a/contrib/testnets/remote/ansible/increase-openfiles.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- - -- hosts: all - any_errors_fatal: true - gather_facts: no - roles: - - increase-openfiles - diff --git a/contrib/testnets/remote/ansible/install-datadog-agent.yml b/contrib/testnets/remote/ansible/install-datadog-agent.yml deleted file mode 100644 index b88600ea..00000000 --- a/contrib/testnets/remote/ansible/install-datadog-agent.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- - -#DD_API_KEY,TESTNET_NAME,CLUSTER_NAME required - -- hosts: all - any_errors_fatal: true - gather_facts: no - roles: - - setup-journald - - install-datadog-agent - - update-datadog-agent - diff --git a/contrib/testnets/remote/ansible/inventory/COPYING b/contrib/testnets/remote/ansible/inventory/COPYING deleted file mode 100644 index 10926e87..00000000 --- a/contrib/testnets/remote/ansible/inventory/COPYING +++ /dev/null @@ -1,675 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. - diff --git a/contrib/testnets/remote/ansible/inventory/digital_ocean.ini b/contrib/testnets/remote/ansible/inventory/digital_ocean.ini deleted file mode 100644 index b809554b..00000000 --- a/contrib/testnets/remote/ansible/inventory/digital_ocean.ini +++ /dev/null @@ -1,34 +0,0 @@ -# Ansible DigitalOcean external inventory script settings -# - -[digital_ocean] - -# The module needs your DigitalOcean API Token. -# It may also be specified on the command line via --api-token -# or via the environment variables DO_API_TOKEN or DO_API_KEY -# -#api_token = 123456abcdefg - - -# API calls to DigitalOcean may be slow. For this reason, we cache the results -# of an API call. Set this to the path you want cache files to be written to. -# One file will be written to this directory: -# - ansible-digital_ocean.cache -# -cache_path = /tmp - - -# The number of seconds a cache file is considered valid. After this many -# seconds, a new API call will be made, and the cache file will be updated. -# -cache_max_age = 300 - -# Use the private network IP address instead of the public when available. -# -use_private_network = False - -# Pass variables to every group, e.g.: -# -# group_variables = { 'ansible_user': 'root' } -# -group_variables = {} diff --git a/contrib/testnets/remote/ansible/inventory/digital_ocean.py b/contrib/testnets/remote/ansible/inventory/digital_ocean.py deleted file mode 100755 index 24ba6437..00000000 --- a/contrib/testnets/remote/ansible/inventory/digital_ocean.py +++ /dev/null @@ -1,471 +0,0 @@ -#!/usr/bin/env python - -''' -DigitalOcean external inventory script -====================================== - -Generates Ansible inventory of DigitalOcean Droplets. - -In addition to the --list and --host options used by Ansible, there are options -for generating JSON of other DigitalOcean data. This is useful when creating -droplets. For example, --regions will return all the DigitalOcean Regions. -This information can also be easily found in the cache file, whose default -location is /tmp/ansible-digital_ocean.cache). - -The --pretty (-p) option pretty-prints the output for better human readability. - ----- -Although the cache stores all the information received from DigitalOcean, -the cache is not used for current droplet information (in --list, --host, ---all, and --droplets). This is so that accurate droplet information is always -found. You can force this script to use the cache with --force-cache. - ----- -Configuration is read from `digital_ocean.ini`, then from environment variables, -then and command-line arguments. - -Most notably, the DigitalOcean API Token must be specified. It can be specified -in the INI file or with the following environment variables: - export DO_API_TOKEN='abc123' or - export DO_API_KEY='abc123' - -Alternatively, it can be passed on the command-line with --api-token. - -If you specify DigitalOcean credentials in the INI file, a handy way to -get them into your environment (e.g., to use the digital_ocean module) -is to use the output of the --env option with export: - export $(digital_ocean.py --env) - ----- -The following groups are generated from --list: - - ID (droplet ID) - - NAME (droplet NAME) - - image_ID - - image_NAME - - distro_NAME (distribution NAME from image) - - region_NAME - - size_NAME - - status_STATUS - -For each host, the following variables are registered: - - do_backup_ids - - do_created_at - - do_disk - - do_features - list - - do_id - - do_image - object - - do_ip_address - - do_private_ip_address - - do_kernel - object - - do_locked - - do_memory - - do_name - - do_networks - object - - do_next_backup_window - - do_region - object - - do_size - object - - do_size_slug - - do_snapshot_ids - list - - do_status - - do_tags - - do_vcpus - - do_volume_ids - ------ -``` -usage: digital_ocean.py [-h] [--list] [--host HOST] [--all] - [--droplets] [--regions] [--images] [--sizes] - [--ssh-keys] [--domains] [--pretty] - [--cache-path CACHE_PATH] - [--cache-max_age CACHE_MAX_AGE] - [--force-cache] - [--refresh-cache] - [--api-token API_TOKEN] - -Produce an Ansible Inventory file based on DigitalOcean credentials - -optional arguments: - -h, --help show this help message and exit - --list List all active Droplets as Ansible inventory - (default: True) - --host HOST Get all Ansible inventory variables about a specific - Droplet - --all List all DigitalOcean information as JSON - --droplets List Droplets as JSON - --regions List Regions as JSON - --images List Images as JSON - --sizes List Sizes as JSON - --ssh-keys List SSH keys as JSON - --domains List Domains as JSON - --pretty, -p Pretty-print results - --cache-path CACHE_PATH - Path to the cache files (default: .) - --cache-max_age CACHE_MAX_AGE - Maximum age of the cached items (default: 0) - --force-cache Only use data from the cache - --refresh-cache Force refresh of cache by making API requests to - DigitalOcean (default: False - use cache files) - --api-token API_TOKEN, -a API_TOKEN - DigitalOcean API Token -``` - -''' - -# (c) 2013, Evan Wies -# -# Inspired by the EC2 inventory plugin: -# https://github.com/ansible/ansible/blob/devel/contrib/inventory/ec2.py -# -# This file is part of Ansible, -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -###################################################################### - -import os -import sys -import re -import argparse -from time import time -import ConfigParser -import ast - -try: - import json -except ImportError: - import simplejson as json - -try: - from dopy.manager import DoManager -except ImportError as e: - sys.exit("failed=True msg='`dopy` library required for this script'") - - -class DigitalOceanInventory(object): - - ########################################################################### - # Main execution path - ########################################################################### - - def __init__(self): - ''' Main execution path ''' - - # DigitalOceanInventory data - self.data = {} # All DigitalOcean data - self.inventory = {} # Ansible Inventory - - # Define defaults - self.cache_path = '.' - self.cache_max_age = 0 - self.use_private_network = False - self.group_variables = {} - - # Read settings, environment variables, and CLI arguments - self.read_settings() - self.read_environment() - self.read_cli_args() - - # Verify credentials were set - if not hasattr(self, 'api_token'): - sys.stderr.write('''Could not find values for DigitalOcean api_token. -They must be specified via either ini file, command line argument (--api-token), -or environment variables (DO_API_TOKEN)\n''') - sys.exit(-1) - - # env command, show DigitalOcean credentials - if self.args.env: - print("DO_API_TOKEN=%s" % self.api_token) - sys.exit(0) - - # Manage cache - self.cache_filename = self.cache_path + "/ansible-digital_ocean.cache" - self.cache_refreshed = False - - if self.is_cache_valid(): - self.load_from_cache() - if len(self.data) == 0: - if self.args.force_cache: - sys.stderr.write('''Cache is empty and --force-cache was specified\n''') - sys.exit(-1) - - self.manager = DoManager(None, self.api_token, api_version=2) - - # Pick the json_data to print based on the CLI command - if self.args.droplets: - self.load_from_digital_ocean('droplets') - json_data = {'droplets': self.data['droplets']} - elif self.args.regions: - self.load_from_digital_ocean('regions') - json_data = {'regions': self.data['regions']} - elif self.args.images: - self.load_from_digital_ocean('images') - json_data = {'images': self.data['images']} - elif self.args.sizes: - self.load_from_digital_ocean('sizes') - json_data = {'sizes': self.data['sizes']} - elif self.args.ssh_keys: - self.load_from_digital_ocean('ssh_keys') - json_data = {'ssh_keys': self.data['ssh_keys']} - elif self.args.domains: - self.load_from_digital_ocean('domains') - json_data = {'domains': self.data['domains']} - elif self.args.all: - self.load_from_digital_ocean() - json_data = self.data - elif self.args.host: - json_data = self.load_droplet_variables_for_host() - else: # '--list' this is last to make it default - self.load_from_digital_ocean('droplets') - self.build_inventory() - json_data = self.inventory - - if self.cache_refreshed: - self.write_to_cache() - - if self.args.pretty: - print(json.dumps(json_data, sort_keys=True, indent=2)) - else: - print(json.dumps(json_data)) - # That's all she wrote... - - ########################################################################### - # Script configuration - ########################################################################### - - def read_settings(self): - ''' Reads the settings from the digital_ocean.ini file ''' - config = ConfigParser.SafeConfigParser() - config.read(os.path.dirname(os.path.realpath(__file__)) + '/digital_ocean.ini') - - # Credentials - if config.has_option('digital_ocean', 'api_token'): - self.api_token = config.get('digital_ocean', 'api_token') - - # Cache related - if config.has_option('digital_ocean', 'cache_path'): - self.cache_path = config.get('digital_ocean', 'cache_path') - if config.has_option('digital_ocean', 'cache_max_age'): - self.cache_max_age = config.getint('digital_ocean', 'cache_max_age') - - # Private IP Address - if config.has_option('digital_ocean', 'use_private_network'): - self.use_private_network = config.getboolean('digital_ocean', 'use_private_network') - - # Group variables - if config.has_option('digital_ocean', 'group_variables'): - self.group_variables = ast.literal_eval(config.get('digital_ocean', 'group_variables')) - - def read_environment(self): - ''' Reads the settings from environment variables ''' - # Setup credentials - if os.getenv("DO_API_TOKEN"): - self.api_token = os.getenv("DO_API_TOKEN") - if os.getenv("DO_API_KEY"): - self.api_token = os.getenv("DO_API_KEY") - - def read_cli_args(self): - ''' Command line argument processing ''' - parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on DigitalOcean credentials') - - parser.add_argument('--list', action='store_true', help='List all active Droplets as Ansible inventory (default: True)') - parser.add_argument('--host', action='store', help='Get all Ansible inventory variables about a specific Droplet') - - parser.add_argument('--all', action='store_true', help='List all DigitalOcean information as JSON') - parser.add_argument('--droplets', '-d', action='store_true', help='List Droplets as JSON') - parser.add_argument('--regions', action='store_true', help='List Regions as JSON') - parser.add_argument('--images', action='store_true', help='List Images as JSON') - parser.add_argument('--sizes', action='store_true', help='List Sizes as JSON') - parser.add_argument('--ssh-keys', action='store_true', help='List SSH keys as JSON') - parser.add_argument('--domains', action='store_true', help='List Domains as JSON') - - parser.add_argument('--pretty', '-p', action='store_true', help='Pretty-print results') - - parser.add_argument('--cache-path', action='store', help='Path to the cache files (default: .)') - parser.add_argument('--cache-max_age', action='store', help='Maximum age of the cached items (default: 0)') - parser.add_argument('--force-cache', action='store_true', default=False, help='Only use data from the cache') - parser.add_argument('--refresh-cache', '-r', action='store_true', default=False, - help='Force refresh of cache by making API requests to DigitalOcean (default: False - use cache files)') - - parser.add_argument('--env', '-e', action='store_true', help='Display DO_API_TOKEN') - parser.add_argument('--api-token', '-a', action='store', help='DigitalOcean API Token') - - self.args = parser.parse_args() - - if self.args.api_token: - self.api_token = self.args.api_token - - # Make --list default if none of the other commands are specified - if (not self.args.droplets and not self.args.regions and - not self.args.images and not self.args.sizes and - not self.args.ssh_keys and not self.args.domains and - not self.args.all and not self.args.host): - self.args.list = True - - ########################################################################### - # Data Management - ########################################################################### - - def load_from_digital_ocean(self, resource=None): - '''Get JSON from DigitalOcean API''' - if self.args.force_cache and os.path.isfile(self.cache_filename): - return - # We always get fresh droplets - if self.is_cache_valid() and not (resource == 'droplets' or resource is None): - return - if self.args.refresh_cache: - resource = None - - if resource == 'droplets' or resource is None: - self.data['droplets'] = self.manager.all_active_droplets() - self.cache_refreshed = True - if resource == 'regions' or resource is None: - self.data['regions'] = self.manager.all_regions() - self.cache_refreshed = True - if resource == 'images' or resource is None: - self.data['images'] = self.manager.all_images(filter=None) - self.cache_refreshed = True - if resource == 'sizes' or resource is None: - self.data['sizes'] = self.manager.sizes() - self.cache_refreshed = True - if resource == 'ssh_keys' or resource is None: - self.data['ssh_keys'] = self.manager.all_ssh_keys() - self.cache_refreshed = True - if resource == 'domains' or resource is None: - self.data['domains'] = self.manager.all_domains() - self.cache_refreshed = True - - def build_inventory(self): - '''Build Ansible inventory of droplets''' - self.inventory = { - 'all': { - 'hosts': [], - 'vars': self.group_variables - }, - '_meta': {'hostvars': {}} - } - - # add all droplets by id and name - for droplet in self.data['droplets']: - # when using private_networking, the API reports the private one in "ip_address". - if 'private_networking' in droplet['features'] and not self.use_private_network: - for net in droplet['networks']['v4']: - if net['type'] == 'public': - dest = net['ip_address'] - else: - continue - else: - dest = droplet['ip_address'] - - self.inventory['all']['hosts'].append(dest) - - self.inventory[droplet['id']] = [dest] - self.inventory[droplet['name']] = [dest] - - # groups that are always present - for group in ('region_' + droplet['region']['slug'], - 'image_' + str(droplet['image']['id']), - 'size_' + droplet['size']['slug'], - 'distro_' + self.to_safe(droplet['image']['distribution']), - 'status_' + droplet['status']): - if group not in self.inventory: - self.inventory[group] = {'hosts': [], 'vars': {}} - self.inventory[group]['hosts'].append(dest) - - # groups that are not always present - for group in (droplet['image']['slug'], - droplet['image']['name']): - if group: - image = 'image_' + self.to_safe(group) - if image not in self.inventory: - self.inventory[image] = {'hosts': [], 'vars': {}} - self.inventory[image]['hosts'].append(dest) - - if droplet['tags']: - for tag in droplet['tags']: - if tag not in self.inventory: - self.inventory[tag] = {'hosts': [], 'vars': {}} - self.inventory[tag]['hosts'].append(dest) - - # hostvars - info = self.do_namespace(droplet) - self.inventory['_meta']['hostvars'][dest] = info - - def load_droplet_variables_for_host(self): - '''Generate a JSON response to a --host call''' - host = int(self.args.host) - droplet = self.manager.show_droplet(host) - info = self.do_namespace(droplet) - return {'droplet': info} - - ########################################################################### - # Cache Management - ########################################################################### - - def is_cache_valid(self): - ''' Determines if the cache files have expired, or if it is still valid ''' - if os.path.isfile(self.cache_filename): - mod_time = os.path.getmtime(self.cache_filename) - current_time = time() - if (mod_time + self.cache_max_age) > current_time: - return True - return False - - def load_from_cache(self): - ''' Reads the data from the cache file and assigns it to member variables as Python Objects''' - try: - cache = open(self.cache_filename, 'r') - json_data = cache.read() - cache.close() - data = json.loads(json_data) - except IOError: - data = {'data': {}, 'inventory': {}} - - self.data = data['data'] - self.inventory = data['inventory'] - - def write_to_cache(self): - ''' Writes data in JSON format to a file ''' - data = {'data': self.data, 'inventory': self.inventory} - json_data = json.dumps(data, sort_keys=True, indent=2) - - cache = open(self.cache_filename, 'w') - cache.write(json_data) - cache.close() - - ########################################################################### - # Utilities - ########################################################################### - - def push(self, my_dict, key, element): - ''' Pushed an element onto an array that may not have been defined in the dict ''' - if key in my_dict: - my_dict[key].append(element) - else: - my_dict[key] = [element] - - def to_safe(self, word): - ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' - return re.sub("[^A-Za-z0-9\-\.]", "_", word) - - def do_namespace(self, data): - ''' Returns a copy of the dictionary with all the keys put in a 'do_' namespace ''' - info = {} - for k, v in data.items(): - info['do_' + k] = v - return info - - -########################################################################### -# Run the script -DigitalOceanInventory() diff --git a/contrib/testnets/remote/ansible/inventory/ec2.ini b/contrib/testnets/remote/ansible/inventory/ec2.ini deleted file mode 100644 index e11a69cc..00000000 --- a/contrib/testnets/remote/ansible/inventory/ec2.ini +++ /dev/null @@ -1,209 +0,0 @@ -# Ansible EC2 external inventory script settings -# - -[ec2] - -# to talk to a private eucalyptus instance uncomment these lines -# and edit edit eucalyptus_host to be the host name of your cloud controller -#eucalyptus = True -#eucalyptus_host = clc.cloud.domain.org - -# AWS regions to make calls to. Set this to 'all' to make request to all regions -# in AWS and merge the results together. Alternatively, set this to a comma -# separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2' and do not -# provide the 'regions_exclude' option. If this is set to 'auto', AWS_REGION or -# AWS_DEFAULT_REGION environment variable will be read to determine the region. -regions = all -regions_exclude = us-gov-west-1, cn-north-1 - -# When generating inventory, Ansible needs to know how to address a server. -# Each EC2 instance has a lot of variables associated with it. Here is the list: -# http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance -# Below are 2 variables that are used as the address of a server: -# - destination_variable -# - vpc_destination_variable - -# This is the normal destination variable to use. If you are running Ansible -# from outside EC2, then 'public_dns_name' makes the most sense. If you are -# running Ansible from within EC2, then perhaps you want to use the internal -# address, and should set this to 'private_dns_name'. The key of an EC2 tag -# may optionally be used; however the boto instance variables hold precedence -# in the event of a collision. -destination_variable = public_dns_name - -# This allows you to override the inventory_name with an ec2 variable, instead -# of using the destination_variable above. Addressing (aka ansible_ssh_host) -# will still use destination_variable. Tags should be written as 'tag_TAGNAME'. -#hostname_variable = tag_Name - -# For server inside a VPC, using DNS names may not make sense. When an instance -# has 'subnet_id' set, this variable is used. If the subnet is public, setting -# this to 'ip_address' will return the public IP address. For instances in a -# private subnet, this should be set to 'private_ip_address', and Ansible must -# be run from within EC2. The key of an EC2 tag may optionally be used; however -# the boto instance variables hold precedence in the event of a collision. -# WARNING: - instances that are in the private vpc, _without_ public ip address -# will not be listed in the inventory until You set: -# vpc_destination_variable = private_ip_address -vpc_destination_variable = ip_address - -# The following two settings allow flexible ansible host naming based on a -# python format string and a comma-separated list of ec2 tags. Note that: -# -# 1) If the tags referenced are not present for some instances, empty strings -# will be substituted in the format string. -# 2) This overrides both destination_variable and vpc_destination_variable. -# -#destination_format = {0}.{1}.example.com -#destination_format_tags = Name,environment - -# To tag instances on EC2 with the resource records that point to them from -# Route53, set 'route53' to True. -route53 = False - -# To use Route53 records as the inventory hostnames, uncomment and set -# to equal the domain name you wish to use. You must also have 'route53' (above) -# set to True. -# route53_hostnames = .example.com - -# To exclude RDS instances from the inventory, uncomment and set to False. -#rds = False - -# To exclude ElastiCache instances from the inventory, uncomment and set to False. -#elasticache = False - -# Additionally, you can specify the list of zones to exclude looking up in -# 'route53_excluded_zones' as a comma-separated list. -# route53_excluded_zones = samplezone1.com, samplezone2.com - -# By default, only EC2 instances in the 'running' state are returned. Set -# 'all_instances' to True to return all instances regardless of state. -all_instances = False - -# By default, only EC2 instances in the 'running' state are returned. Specify -# EC2 instance states to return as a comma-separated list. This -# option is overridden when 'all_instances' is True. -# instance_states = pending, running, shutting-down, terminated, stopping, stopped - -# By default, only RDS instances in the 'available' state are returned. Set -# 'all_rds_instances' to True return all RDS instances regardless of state. -all_rds_instances = False - -# Include RDS cluster information (Aurora etc.) -include_rds_clusters = False - -# By default, only ElastiCache clusters and nodes in the 'available' state -# are returned. Set 'all_elasticache_clusters' and/or 'all_elastic_nodes' -# to True return all ElastiCache clusters and nodes, regardless of state. -# -# Note that all_elasticache_nodes only applies to listed clusters. That means -# if you set all_elastic_clusters to false, no node will be return from -# unavailable clusters, regardless of the state and to what you set for -# all_elasticache_nodes. -all_elasticache_replication_groups = False -all_elasticache_clusters = False -all_elasticache_nodes = False - -# API calls to EC2 are slow. For this reason, we cache the results of an API -# call. Set this to the path you want cache files to be written to. Two files -# will be written to this directory: -# - ansible-ec2.cache -# - ansible-ec2.index -cache_path = ~/.ansible/tmp - -# The number of seconds a cache file is considered valid. After this many -# seconds, a new API call will be made, and the cache file will be updated. -# To disable the cache, set this value to 0 -cache_max_age = 300 - -# Organize groups into a nested/hierarchy instead of a flat namespace. -nested_groups = False - -# Replace - tags when creating groups to avoid issues with ansible -replace_dash_in_groups = True - -# If set to true, any tag of the form "a,b,c" is expanded into a list -# and the results are used to create additional tag_* inventory groups. -expand_csv_tags = False - -# The EC2 inventory output can become very large. To manage its size, -# configure which groups should be created. -group_by_instance_id = True -group_by_region = True -group_by_availability_zone = True -group_by_aws_account = False -group_by_ami_id = True -group_by_instance_type = True -group_by_instance_state = False -group_by_key_pair = True -group_by_vpc_id = True -group_by_security_group = True -group_by_tag_keys = True -group_by_tag_none = True -group_by_route53_names = True -group_by_rds_engine = True -group_by_rds_parameter_group = True -group_by_elasticache_engine = True -group_by_elasticache_cluster = True -group_by_elasticache_parameter_group = True -group_by_elasticache_replication_group = True - -# If you only want to include hosts that match a certain regular expression -# pattern_include = staging-* - -# If you want to exclude any hosts that match a certain regular expression -# pattern_exclude = staging-* - -# Instance filters can be used to control which instances are retrieved for -# inventory. For the full list of possible filters, please read the EC2 API -# docs: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstances.html#query-DescribeInstances-filters -# Filters are key/value pairs separated by '=', to list multiple filters use -# a list separated by commas. See examples below. - -# If you want to apply multiple filters simultaneously, set stack_filters to -# True. Default behaviour is to combine the results of all filters. Stacking -# allows the use of multiple conditions to filter down, for example by -# environment and type of host. -stack_filters = False - -# Retrieve only instances with (key=value) env=staging tag -# instance_filters = tag:env=staging - -# Retrieve only instances with role=webservers OR role=dbservers tag -# instance_filters = tag:role=webservers,tag:role=dbservers - -# Retrieve only t1.micro instances OR instances with tag env=staging -# instance_filters = instance-type=t1.micro,tag:env=staging - -# You can use wildcards in filter values also. Below will list instances which -# tag Name value matches webservers1* -# (ex. webservers15, webservers1a, webservers123 etc) -# instance_filters = tag:Name=webservers1* - -# An IAM role can be assumed, so all requests are run as that role. -# This can be useful for connecting across different accounts, or to limit user -# access -# iam_role = role-arn - -# A boto configuration profile may be used to separate out credentials -# see http://boto.readthedocs.org/en/latest/boto_config_tut.html -# boto_profile = some-boto-profile-name - - -[credentials] - -# The AWS credentials can optionally be specified here. Credentials specified -# here are ignored if the environment variable AWS_ACCESS_KEY_ID or -# AWS_PROFILE is set, or if the boto_profile property above is set. -# -# Supplying AWS credentials here is not recommended, as it introduces -# non-trivial security concerns. When going down this route, please make sure -# to set access permissions for this file correctly, e.g. handle it the same -# way as you would a private SSH key. -# -# Unlike the boto and AWS configure files, this section does not support -# profiles. -# -# aws_access_key_id = AXXXXXXXXXXXXXX -# aws_secret_access_key = XXXXXXXXXXXXXXXXXXX -# aws_security_token = XXXXXXXXXXXXXXXXXXXXXXXXXXXX diff --git a/contrib/testnets/remote/ansible/inventory/ec2.py b/contrib/testnets/remote/ansible/inventory/ec2.py deleted file mode 100755 index 9614c5fe..00000000 --- a/contrib/testnets/remote/ansible/inventory/ec2.py +++ /dev/null @@ -1,1595 +0,0 @@ -#!/usr/bin/env python - -''' -EC2 external inventory script -================================= - -Generates inventory that Ansible can understand by making API request to -AWS EC2 using the Boto library. - -NOTE: This script assumes Ansible is being executed where the environment -variables needed for Boto have already been set: - export AWS_ACCESS_KEY_ID='AK123' - export AWS_SECRET_ACCESS_KEY='abc123' - -optional region environement variable if region is 'auto' - -This script also assumes there is an ec2.ini file alongside it. To specify a -different path to ec2.ini, define the EC2_INI_PATH environment variable: - - export EC2_INI_PATH=/path/to/my_ec2.ini - -If you're using eucalyptus you need to set the above variables and -you need to define: - - export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus - -If you're using boto profiles (requires boto>=2.24.0) you can choose a profile -using the --boto-profile command line argument (e.g. ec2.py --boto-profile prod) or using -the AWS_PROFILE variable: - - AWS_PROFILE=prod ansible-playbook -i ec2.py myplaybook.yml - -For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html - -When run against a specific host, this script returns the following variables: - - ec2_ami_launch_index - - ec2_architecture - - ec2_association - - ec2_attachTime - - ec2_attachment - - ec2_attachmentId - - ec2_block_devices - - ec2_client_token - - ec2_deleteOnTermination - - ec2_description - - ec2_deviceIndex - - ec2_dns_name - - ec2_eventsSet - - ec2_group_name - - ec2_hypervisor - - ec2_id - - ec2_image_id - - ec2_instanceState - - ec2_instance_type - - ec2_ipOwnerId - - ec2_ip_address - - ec2_item - - ec2_kernel - - ec2_key_name - - ec2_launch_time - - ec2_monitored - - ec2_monitoring - - ec2_networkInterfaceId - - ec2_ownerId - - ec2_persistent - - ec2_placement - - ec2_platform - - ec2_previous_state - - ec2_private_dns_name - - ec2_private_ip_address - - ec2_publicIp - - ec2_public_dns_name - - ec2_ramdisk - - ec2_reason - - ec2_region - - ec2_requester_id - - ec2_root_device_name - - ec2_root_device_type - - ec2_security_group_ids - - ec2_security_group_names - - ec2_shutdown_state - - ec2_sourceDestCheck - - ec2_spot_instance_request_id - - ec2_state - - ec2_state_code - - ec2_state_reason - - ec2_status - - ec2_subnet_id - - ec2_tenancy - - ec2_virtualization_type - - ec2_vpc_id - -These variables are pulled out of a boto.ec2.instance object. There is a lack of -consistency with variable spellings (camelCase and underscores) since this -just loops through all variables the object exposes. It is preferred to use the -ones with underscores when multiple exist. - -In addition, if an instance has AWS Tags associated with it, each tag is a new -variable named: - - ec2_tag_[Key] = [Value] - -Security groups are comma-separated in 'ec2_security_group_ids' and -'ec2_security_group_names'. -''' - -# (c) 2012, Peter Sankauskas -# -# This file is part of Ansible, -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -###################################################################### - -import sys -import os -import argparse -import re -from time import time -import boto -from boto import ec2 -from boto import rds -from boto import elasticache -from boto import route53 -from boto import sts -import six - -from ansible.module_utils import ec2 as ec2_utils - -HAS_BOTO3 = False -try: - import boto3 - HAS_BOTO3 = True -except ImportError: - pass - -from six.moves import configparser -from collections import defaultdict - -try: - import json -except ImportError: - import simplejson as json - - -class Ec2Inventory(object): - - def _empty_inventory(self): - return {"_meta": {"hostvars": {}}} - - def __init__(self): - ''' Main execution path ''' - - # Inventory grouped by instance IDs, tags, security groups, regions, - # and availability zones - self.inventory = self._empty_inventory() - - self.aws_account_id = None - - # Index of hostname (address) to instance ID - self.index = {} - - # Boto profile to use (if any) - self.boto_profile = None - - # AWS credentials. - self.credentials = {} - - # Read settings and parse CLI arguments - self.parse_cli_args() - self.read_settings() - - # Make sure that profile_name is not passed at all if not set - # as pre 2.24 boto will fall over otherwise - if self.boto_profile: - if not hasattr(boto.ec2.EC2Connection, 'profile_name'): - self.fail_with_error("boto version must be >= 2.24 to use profile") - - # Cache - if self.args.refresh_cache: - self.do_api_calls_update_cache() - elif not self.is_cache_valid(): - self.do_api_calls_update_cache() - - # Data to print - if self.args.host: - data_to_print = self.get_host_info() - - elif self.args.list: - # Display list of instances for inventory - if self.inventory == self._empty_inventory(): - data_to_print = self.get_inventory_from_cache() - else: - data_to_print = self.json_format_dict(self.inventory, True) - - print(data_to_print) - - def is_cache_valid(self): - ''' Determines if the cache files have expired, or if it is still valid ''' - - if os.path.isfile(self.cache_path_cache): - mod_time = os.path.getmtime(self.cache_path_cache) - current_time = time() - if (mod_time + self.cache_max_age) > current_time: - if os.path.isfile(self.cache_path_index): - return True - - return False - - def read_settings(self): - ''' Reads the settings from the ec2.ini file ''' - - scriptbasename = __file__ - scriptbasename = os.path.basename(scriptbasename) - scriptbasename = scriptbasename.replace('.py', '') - - defaults = { - 'ec2': { - 'ini_path': os.path.join(os.path.dirname(__file__), '%s.ini' % scriptbasename) - } - } - - if six.PY3: - config = configparser.ConfigParser() - else: - config = configparser.SafeConfigParser() - ec2_ini_path = os.environ.get('EC2_INI_PATH', defaults['ec2']['ini_path']) - ec2_ini_path = os.path.expanduser(os.path.expandvars(ec2_ini_path)) - config.read(ec2_ini_path) - - # is eucalyptus? - self.eucalyptus_host = None - self.eucalyptus = False - if config.has_option('ec2', 'eucalyptus'): - self.eucalyptus = config.getboolean('ec2', 'eucalyptus') - if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'): - self.eucalyptus_host = config.get('ec2', 'eucalyptus_host') - - # Regions - self.regions = [] - configRegions = config.get('ec2', 'regions') - if (configRegions == 'all'): - if self.eucalyptus_host: - self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name, **self.credentials) - else: - configRegions_exclude = config.get('ec2', 'regions_exclude') - for regionInfo in ec2.regions(): - if regionInfo.name not in configRegions_exclude: - self.regions.append(regionInfo.name) - else: - self.regions = configRegions.split(",") - if 'auto' in self.regions: - env_region = os.environ.get('AWS_REGION') - if env_region is None: - env_region = os.environ.get('AWS_DEFAULT_REGION') - self.regions = [env_region] - - # Destination addresses - self.destination_variable = config.get('ec2', 'destination_variable') - self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable') - - if config.has_option('ec2', 'hostname_variable'): - self.hostname_variable = config.get('ec2', 'hostname_variable') - else: - self.hostname_variable = None - - if config.has_option('ec2', 'destination_format') and \ - config.has_option('ec2', 'destination_format_tags'): - self.destination_format = config.get('ec2', 'destination_format') - self.destination_format_tags = config.get('ec2', 'destination_format_tags').split(',') - else: - self.destination_format = None - self.destination_format_tags = None - - # Route53 - self.route53_enabled = config.getboolean('ec2', 'route53') - if config.has_option('ec2', 'route53_hostnames'): - self.route53_hostnames = config.get('ec2', 'route53_hostnames') - else: - self.route53_hostnames = None - self.route53_excluded_zones = [] - if config.has_option('ec2', 'route53_excluded_zones'): - self.route53_excluded_zones.extend( - config.get('ec2', 'route53_excluded_zones', '').split(',')) - - # Include RDS instances? - self.rds_enabled = True - if config.has_option('ec2', 'rds'): - self.rds_enabled = config.getboolean('ec2', 'rds') - - # Include RDS cluster instances? - if config.has_option('ec2', 'include_rds_clusters'): - self.include_rds_clusters = config.getboolean('ec2', 'include_rds_clusters') - else: - self.include_rds_clusters = False - - # Include ElastiCache instances? - self.elasticache_enabled = True - if config.has_option('ec2', 'elasticache'): - self.elasticache_enabled = config.getboolean('ec2', 'elasticache') - - # Return all EC2 instances? - if config.has_option('ec2', 'all_instances'): - self.all_instances = config.getboolean('ec2', 'all_instances') - else: - self.all_instances = False - - # Instance states to be gathered in inventory. Default is 'running'. - # Setting 'all_instances' to 'yes' overrides this option. - ec2_valid_instance_states = [ - 'pending', - 'running', - 'shutting-down', - 'terminated', - 'stopping', - 'stopped' - ] - self.ec2_instance_states = [] - if self.all_instances: - self.ec2_instance_states = ec2_valid_instance_states - elif config.has_option('ec2', 'instance_states'): - for instance_state in config.get('ec2', 'instance_states').split(','): - instance_state = instance_state.strip() - if instance_state not in ec2_valid_instance_states: - continue - self.ec2_instance_states.append(instance_state) - else: - self.ec2_instance_states = ['running'] - - # Return all RDS instances? (if RDS is enabled) - if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled: - self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances') - else: - self.all_rds_instances = False - - # Return all ElastiCache replication groups? (if ElastiCache is enabled) - if config.has_option('ec2', 'all_elasticache_replication_groups') and self.elasticache_enabled: - self.all_elasticache_replication_groups = config.getboolean('ec2', 'all_elasticache_replication_groups') - else: - self.all_elasticache_replication_groups = False - - # Return all ElastiCache clusters? (if ElastiCache is enabled) - if config.has_option('ec2', 'all_elasticache_clusters') and self.elasticache_enabled: - self.all_elasticache_clusters = config.getboolean('ec2', 'all_elasticache_clusters') - else: - self.all_elasticache_clusters = False - - # Return all ElastiCache nodes? (if ElastiCache is enabled) - if config.has_option('ec2', 'all_elasticache_nodes') and self.elasticache_enabled: - self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes') - else: - self.all_elasticache_nodes = False - - # boto configuration profile (prefer CLI argument then environment variables then config file) - self.boto_profile = self.args.boto_profile or os.environ.get('AWS_PROFILE') - if config.has_option('ec2', 'boto_profile') and not self.boto_profile: - self.boto_profile = config.get('ec2', 'boto_profile') - - # AWS credentials (prefer environment variables) - if not (self.boto_profile or os.environ.get('AWS_ACCESS_KEY_ID') or - os.environ.get('AWS_PROFILE')): - if config.has_option('credentials', 'aws_access_key_id'): - aws_access_key_id = config.get('credentials', 'aws_access_key_id') - else: - aws_access_key_id = None - if config.has_option('credentials', 'aws_secret_access_key'): - aws_secret_access_key = config.get('credentials', 'aws_secret_access_key') - else: - aws_secret_access_key = None - if config.has_option('credentials', 'aws_security_token'): - aws_security_token = config.get('credentials', 'aws_security_token') - else: - aws_security_token = None - if aws_access_key_id: - self.credentials = { - 'aws_access_key_id': aws_access_key_id, - 'aws_secret_access_key': aws_secret_access_key - } - if aws_security_token: - self.credentials['security_token'] = aws_security_token - - # Cache related - cache_dir = os.path.expanduser(config.get('ec2', 'cache_path')) - if self.boto_profile: - cache_dir = os.path.join(cache_dir, 'profile_' + self.boto_profile) - if not os.path.exists(cache_dir): - os.makedirs(cache_dir) - - cache_name = 'ansible-ec2' - cache_id = self.boto_profile or os.environ.get('AWS_ACCESS_KEY_ID', self.credentials.get('aws_access_key_id')) - if cache_id: - cache_name = '%s-%s' % (cache_name, cache_id) - self.cache_path_cache = os.path.join(cache_dir, "%s.cache" % cache_name) - self.cache_path_index = os.path.join(cache_dir, "%s.index" % cache_name) - self.cache_max_age = config.getint('ec2', 'cache_max_age') - - if config.has_option('ec2', 'expand_csv_tags'): - self.expand_csv_tags = config.getboolean('ec2', 'expand_csv_tags') - else: - self.expand_csv_tags = False - - # Configure nested groups instead of flat namespace. - if config.has_option('ec2', 'nested_groups'): - self.nested_groups = config.getboolean('ec2', 'nested_groups') - else: - self.nested_groups = False - - # Replace dash or not in group names - if config.has_option('ec2', 'replace_dash_in_groups'): - self.replace_dash_in_groups = config.getboolean('ec2', 'replace_dash_in_groups') - else: - self.replace_dash_in_groups = True - - # IAM role to assume for connection - if config.has_option('ec2', 'iam_role'): - self.iam_role = config.get('ec2', 'iam_role') - else: - self.iam_role = None - - # Configure which groups should be created. - group_by_options = [ - 'group_by_instance_id', - 'group_by_region', - 'group_by_availability_zone', - 'group_by_ami_id', - 'group_by_instance_type', - 'group_by_instance_state', - 'group_by_key_pair', - 'group_by_vpc_id', - 'group_by_security_group', - 'group_by_tag_keys', - 'group_by_tag_none', - 'group_by_route53_names', - 'group_by_rds_engine', - 'group_by_rds_parameter_group', - 'group_by_elasticache_engine', - 'group_by_elasticache_cluster', - 'group_by_elasticache_parameter_group', - 'group_by_elasticache_replication_group', - 'group_by_aws_account', - ] - for option in group_by_options: - if config.has_option('ec2', option): - setattr(self, option, config.getboolean('ec2', option)) - else: - setattr(self, option, True) - - # Do we need to just include hosts that match a pattern? - try: - pattern_include = config.get('ec2', 'pattern_include') - if pattern_include and len(pattern_include) > 0: - self.pattern_include = re.compile(pattern_include) - else: - self.pattern_include = None - except configparser.NoOptionError: - self.pattern_include = None - - # Do we need to exclude hosts that match a pattern? - try: - pattern_exclude = config.get('ec2', 'pattern_exclude') - if pattern_exclude and len(pattern_exclude) > 0: - self.pattern_exclude = re.compile(pattern_exclude) - else: - self.pattern_exclude = None - except configparser.NoOptionError: - self.pattern_exclude = None - - # Do we want to stack multiple filters? - if config.has_option('ec2', 'stack_filters'): - self.stack_filters = config.getboolean('ec2', 'stack_filters') - else: - self.stack_filters = False - - # Instance filters (see boto and EC2 API docs). Ignore invalid filters. - self.ec2_instance_filters = defaultdict(list) - if config.has_option('ec2', 'instance_filters'): - - filters = [f for f in config.get('ec2', 'instance_filters').split(',') if f] - - for instance_filter in filters: - instance_filter = instance_filter.strip() - if not instance_filter or '=' not in instance_filter: - continue - filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)] - if not filter_key: - continue - self.ec2_instance_filters[filter_key].append(filter_value) - - def parse_cli_args(self): - ''' Command line argument processing ''' - - parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2') - parser.add_argument('--list', action='store_true', default=True, - help='List instances (default: True)') - parser.add_argument('--host', action='store', - help='Get all the variables about a specific instance') - parser.add_argument('--refresh-cache', action='store_true', default=False, - help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)') - parser.add_argument('--profile', '--boto-profile', action='store', dest='boto_profile', - help='Use boto profile for connections to EC2') - self.args = parser.parse_args() - - def do_api_calls_update_cache(self): - ''' Do API calls to each region, and save data in cache files ''' - - if self.route53_enabled: - self.get_route53_records() - - for region in self.regions: - self.get_instances_by_region(region) - if self.rds_enabled: - self.get_rds_instances_by_region(region) - if self.elasticache_enabled: - self.get_elasticache_clusters_by_region(region) - self.get_elasticache_replication_groups_by_region(region) - if self.include_rds_clusters: - self.include_rds_clusters_by_region(region) - - self.write_to_cache(self.inventory, self.cache_path_cache) - self.write_to_cache(self.index, self.cache_path_index) - - def connect(self, region): - ''' create connection to api server''' - if self.eucalyptus: - conn = boto.connect_euca(host=self.eucalyptus_host, **self.credentials) - conn.APIVersion = '2010-08-31' - else: - conn = self.connect_to_aws(ec2, region) - return conn - - def boto_fix_security_token_in_profile(self, connect_args): - ''' monkey patch for boto issue boto/boto#2100 ''' - profile = 'profile ' + self.boto_profile - if boto.config.has_option(profile, 'aws_security_token'): - connect_args['security_token'] = boto.config.get(profile, 'aws_security_token') - return connect_args - - def connect_to_aws(self, module, region): - connect_args = self.credentials - - # only pass the profile name if it's set (as it is not supported by older boto versions) - if self.boto_profile: - connect_args['profile_name'] = self.boto_profile - self.boto_fix_security_token_in_profile(connect_args) - - if self.iam_role: - sts_conn = sts.connect_to_region(region, **connect_args) - role = sts_conn.assume_role(self.iam_role, 'ansible_dynamic_inventory') - connect_args['aws_access_key_id'] = role.credentials.access_key - connect_args['aws_secret_access_key'] = role.credentials.secret_key - connect_args['security_token'] = role.credentials.session_token - - conn = module.connect_to_region(region, **connect_args) - # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported - if conn is None: - self.fail_with_error("region name: %s likely not supported, or AWS is down. connection to region failed." % region) - return conn - - def get_instances_by_region(self, region): - ''' Makes an AWS EC2 API call to the list of instances in a particular - region ''' - - try: - conn = self.connect(region) - reservations = [] - if self.ec2_instance_filters: - if self.stack_filters: - filters_dict = {} - for filter_key, filter_values in self.ec2_instance_filters.items(): - filters_dict[filter_key] = filter_values - reservations.extend(conn.get_all_instances(filters=filters_dict)) - else: - for filter_key, filter_values in self.ec2_instance_filters.items(): - reservations.extend(conn.get_all_instances(filters={filter_key: filter_values})) - else: - reservations = conn.get_all_instances() - - # Pull the tags back in a second step - # AWS are on record as saying that the tags fetched in the first `get_all_instances` request are not - # reliable and may be missing, and the only way to guarantee they are there is by calling `get_all_tags` - instance_ids = [] - for reservation in reservations: - instance_ids.extend([instance.id for instance in reservation.instances]) - - max_filter_value = 199 - tags = [] - for i in range(0, len(instance_ids), max_filter_value): - tags.extend(conn.get_all_tags(filters={'resource-type': 'instance', 'resource-id': instance_ids[i:i + max_filter_value]})) - - tags_by_instance_id = defaultdict(dict) - for tag in tags: - tags_by_instance_id[tag.res_id][tag.name] = tag.value - - if (not self.aws_account_id) and reservations: - self.aws_account_id = reservations[0].owner_id - - for reservation in reservations: - for instance in reservation.instances: - instance.tags = tags_by_instance_id[instance.id] - self.add_instance(instance, region) - - except boto.exception.BotoServerError as e: - if e.error_code == 'AuthFailure': - error = self.get_auth_error_message() - else: - backend = 'Eucalyptus' if self.eucalyptus else 'AWS' - error = "Error connecting to %s backend.\n%s" % (backend, e.message) - self.fail_with_error(error, 'getting EC2 instances') - - def get_rds_instances_by_region(self, region): - ''' Makes an AWS API call to the list of RDS instances in a particular - region ''' - - if not HAS_BOTO3: - self.fail_with_error("Working with RDS instances requires boto3 - please install boto3 and try again", - "getting RDS instances") - - client = ec2_utils.boto3_inventory_conn('client', 'rds', region, **self.credentials) - db_instances = client.describe_db_instances() - - try: - conn = self.connect_to_aws(rds, region) - if conn: - marker = None - while True: - instances = conn.get_all_dbinstances(marker=marker) - marker = instances.marker - for index, instance in enumerate(instances): - # Add tags to instances. - instance.arn = db_instances['DBInstances'][index]['DBInstanceArn'] - tags = client.list_tags_for_resource(ResourceName=instance.arn)['TagList'] - instance.tags = {} - for tag in tags: - instance.tags[tag['Key']] = tag['Value'] - - self.add_rds_instance(instance, region) - if not marker: - break - except boto.exception.BotoServerError as e: - error = e.reason - - if e.error_code == 'AuthFailure': - error = self.get_auth_error_message() - if not e.reason == "Forbidden": - error = "Looks like AWS RDS is down:\n%s" % e.message - self.fail_with_error(error, 'getting RDS instances') - - def include_rds_clusters_by_region(self, region): - if not HAS_BOTO3: - self.fail_with_error("Working with RDS clusters requires boto3 - please install boto3 and try again", - "getting RDS clusters") - - client = ec2_utils.boto3_inventory_conn('client', 'rds', region, **self.credentials) - - marker, clusters = '', [] - while marker is not None: - resp = client.describe_db_clusters(Marker=marker) - clusters.extend(resp["DBClusters"]) - marker = resp.get('Marker', None) - - account_id = boto.connect_iam().get_user().arn.split(':')[4] - c_dict = {} - for c in clusters: - # remove these datetime objects as there is no serialisation to json - # currently in place and we don't need the data yet - if 'EarliestRestorableTime' in c: - del c['EarliestRestorableTime'] - if 'LatestRestorableTime' in c: - del c['LatestRestorableTime'] - - if self.ec2_instance_filters == {}: - matches_filter = True - else: - matches_filter = False - - try: - # arn:aws:rds:::: - tags = client.list_tags_for_resource( - ResourceName='arn:aws:rds:' + region + ':' + account_id + ':cluster:' + c['DBClusterIdentifier']) - c['Tags'] = tags['TagList'] - - if self.ec2_instance_filters: - for filter_key, filter_values in self.ec2_instance_filters.items(): - # get AWS tag key e.g. tag:env will be 'env' - tag_name = filter_key.split(":", 1)[1] - # Filter values is a list (if you put multiple values for the same tag name) - matches_filter = any(d['Key'] == tag_name and d['Value'] in filter_values for d in c['Tags']) - - if matches_filter: - # it matches a filter, so stop looking for further matches - break - - except Exception as e: - if e.message.find('DBInstanceNotFound') >= 0: - # AWS RDS bug (2016-01-06) means deletion does not fully complete and leave an 'empty' cluster. - # Ignore errors when trying to find tags for these - pass - - # ignore empty clusters caused by AWS bug - if len(c['DBClusterMembers']) == 0: - continue - elif matches_filter: - c_dict[c['DBClusterIdentifier']] = c - - self.inventory['db_clusters'] = c_dict - - def get_elasticache_clusters_by_region(self, region): - ''' Makes an AWS API call to the list of ElastiCache clusters (with - nodes' info) in a particular region.''' - - # ElastiCache boto module doesn't provide a get_all_intances method, - # that's why we need to call describe directly (it would be called by - # the shorthand method anyway...) - try: - conn = self.connect_to_aws(elasticache, region) - if conn: - # show_cache_node_info = True - # because we also want nodes' information - response = conn.describe_cache_clusters(None, None, None, True) - - except boto.exception.BotoServerError as e: - error = e.reason - - if e.error_code == 'AuthFailure': - error = self.get_auth_error_message() - if not e.reason == "Forbidden": - error = "Looks like AWS ElastiCache is down:\n%s" % e.message - self.fail_with_error(error, 'getting ElastiCache clusters') - - try: - # Boto also doesn't provide wrapper classes to CacheClusters or - # CacheNodes. Because of that we can't make use of the get_list - # method in the AWSQueryConnection. Let's do the work manually - clusters = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters'] - - except KeyError as e: - error = "ElastiCache query to AWS failed (unexpected format)." - self.fail_with_error(error, 'getting ElastiCache clusters') - - for cluster in clusters: - self.add_elasticache_cluster(cluster, region) - - def get_elasticache_replication_groups_by_region(self, region): - ''' Makes an AWS API call to the list of ElastiCache replication groups - in a particular region.''' - - # ElastiCache boto module doesn't provide a get_all_intances method, - # that's why we need to call describe directly (it would be called by - # the shorthand method anyway...) - try: - conn = self.connect_to_aws(elasticache, region) - if conn: - response = conn.describe_replication_groups() - - except boto.exception.BotoServerError as e: - error = e.reason - - if e.error_code == 'AuthFailure': - error = self.get_auth_error_message() - if not e.reason == "Forbidden": - error = "Looks like AWS ElastiCache [Replication Groups] is down:\n%s" % e.message - self.fail_with_error(error, 'getting ElastiCache clusters') - - try: - # Boto also doesn't provide wrapper classes to ReplicationGroups - # Because of that we can't make use of the get_list method in the - # AWSQueryConnection. Let's do the work manually - replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups'] - - except KeyError as e: - error = "ElastiCache [Replication Groups] query to AWS failed (unexpected format)." - self.fail_with_error(error, 'getting ElastiCache clusters') - - for replication_group in replication_groups: - self.add_elasticache_replication_group(replication_group, region) - - def get_auth_error_message(self): - ''' create an informative error message if there is an issue authenticating''' - errors = ["Authentication error retrieving ec2 inventory."] - if None in [os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')]: - errors.append(' - No AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment vars found') - else: - errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct') - - boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials'] - boto_config_found = list(p for p in boto_paths if os.path.isfile(os.path.expanduser(p))) - if len(boto_config_found) > 0: - errors.append(" - Boto configs found at '%s', but the credentials contained may not be correct" % ', '.join(boto_config_found)) - else: - errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths)) - - return '\n'.join(errors) - - def fail_with_error(self, err_msg, err_operation=None): - '''log an error to std err for ansible-playbook to consume and exit''' - if err_operation: - err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format( - err_msg=err_msg, err_operation=err_operation) - sys.stderr.write(err_msg) - sys.exit(1) - - def get_instance(self, region, instance_id): - conn = self.connect(region) - - reservations = conn.get_all_instances([instance_id]) - for reservation in reservations: - for instance in reservation.instances: - return instance - - def add_instance(self, instance, region): - ''' Adds an instance to the inventory and index, as long as it is - addressable ''' - - # Only return instances with desired instance states - if instance.state not in self.ec2_instance_states: - return - - # Select the best destination address - if self.destination_format and self.destination_format_tags: - dest = self.destination_format.format(*[getattr(instance, 'tags').get(tag, '') for tag in self.destination_format_tags]) - elif instance.subnet_id: - dest = getattr(instance, self.vpc_destination_variable, None) - if dest is None: - dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None) - else: - dest = getattr(instance, self.destination_variable, None) - if dest is None: - dest = getattr(instance, 'tags').get(self.destination_variable, None) - - if not dest: - # Skip instances we cannot address (e.g. private VPC subnet) - return - - # Set the inventory name - hostname = None - if self.hostname_variable: - if self.hostname_variable.startswith('tag_'): - hostname = instance.tags.get(self.hostname_variable[4:], None) - else: - hostname = getattr(instance, self.hostname_variable) - - # set the hostname from route53 - if self.route53_enabled and self.route53_hostnames: - route53_names = self.get_instance_route53_names(instance) - for name in route53_names: - if name.endswith(self.route53_hostnames): - hostname = name - - # If we can't get a nice hostname, use the destination address - if not hostname: - hostname = dest - # to_safe strips hostname characters like dots, so don't strip route53 hostnames - elif self.route53_enabled and self.route53_hostnames and hostname.endswith(self.route53_hostnames): - hostname = hostname.lower() - else: - hostname = self.to_safe(hostname).lower() - - # if we only want to include hosts that match a pattern, skip those that don't - if self.pattern_include and not self.pattern_include.match(hostname): - return - - # if we need to exclude hosts that match a pattern, skip those - if self.pattern_exclude and self.pattern_exclude.match(hostname): - return - - # Add to index - self.index[hostname] = [region, instance.id] - - # Inventory: Group by instance ID (always a group of 1) - if self.group_by_instance_id: - self.inventory[instance.id] = [hostname] - if self.nested_groups: - self.push_group(self.inventory, 'instances', instance.id) - - # Inventory: Group by region - if self.group_by_region: - self.push(self.inventory, region, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'regions', region) - - # Inventory: Group by availability zone - if self.group_by_availability_zone: - self.push(self.inventory, instance.placement, hostname) - if self.nested_groups: - if self.group_by_region: - self.push_group(self.inventory, region, instance.placement) - self.push_group(self.inventory, 'zones', instance.placement) - - # Inventory: Group by Amazon Machine Image (AMI) ID - if self.group_by_ami_id: - ami_id = self.to_safe(instance.image_id) - self.push(self.inventory, ami_id, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'images', ami_id) - - # Inventory: Group by instance type - if self.group_by_instance_type: - type_name = self.to_safe('type_' + instance.instance_type) - self.push(self.inventory, type_name, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'types', type_name) - - # Inventory: Group by instance state - if self.group_by_instance_state: - state_name = self.to_safe('instance_state_' + instance.state) - self.push(self.inventory, state_name, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'instance_states', state_name) - - # Inventory: Group by key pair - if self.group_by_key_pair and instance.key_name: - key_name = self.to_safe('key_' + instance.key_name) - self.push(self.inventory, key_name, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'keys', key_name) - - # Inventory: Group by VPC - if self.group_by_vpc_id and instance.vpc_id: - vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id) - self.push(self.inventory, vpc_id_name, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'vpcs', vpc_id_name) - - # Inventory: Group by security group - if self.group_by_security_group: - try: - for group in instance.groups: - key = self.to_safe("security_group_" + group.name) - self.push(self.inventory, key, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'security_groups', key) - except AttributeError: - self.fail_with_error('\n'.join(['Package boto seems a bit older.', - 'Please upgrade boto >= 2.3.0.'])) - - # Inventory: Group by AWS account ID - if self.group_by_aws_account: - self.push(self.inventory, self.aws_account_id, dest) - if self.nested_groups: - self.push_group(self.inventory, 'accounts', self.aws_account_id) - - # Inventory: Group by tag keys - if self.group_by_tag_keys: - for k, v in instance.tags.items(): - if self.expand_csv_tags and v and ',' in v: - values = map(lambda x: x.strip(), v.split(',')) - else: - values = [v] - - for v in values: - if v: - key = self.to_safe("tag_" + k + "=" + v) - else: - key = self.to_safe("tag_" + k) - self.push(self.inventory, key, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k)) - if v: - self.push_group(self.inventory, self.to_safe("tag_" + k), key) - - # Inventory: Group by Route53 domain names if enabled - if self.route53_enabled and self.group_by_route53_names: - route53_names = self.get_instance_route53_names(instance) - for name in route53_names: - self.push(self.inventory, name, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'route53', name) - - # Global Tag: instances without tags - if self.group_by_tag_none and len(instance.tags) == 0: - self.push(self.inventory, 'tag_none', hostname) - if self.nested_groups: - self.push_group(self.inventory, 'tags', 'tag_none') - - # Global Tag: tag all EC2 instances - self.push(self.inventory, 'ec2', hostname) - - self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance) - self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest - - def add_rds_instance(self, instance, region): - ''' Adds an RDS instance to the inventory and index, as long as it is - addressable ''' - - # Only want available instances unless all_rds_instances is True - if not self.all_rds_instances and instance.status != 'available': - return - - # Select the best destination address - dest = instance.endpoint[0] - - if not dest: - # Skip instances we cannot address (e.g. private VPC subnet) - return - - # Set the inventory name - hostname = None - if self.hostname_variable: - if self.hostname_variable.startswith('tag_'): - hostname = instance.tags.get(self.hostname_variable[4:], None) - else: - hostname = getattr(instance, self.hostname_variable) - - # If we can't get a nice hostname, use the destination address - if not hostname: - hostname = dest - - hostname = self.to_safe(hostname).lower() - - # Add to index - self.index[hostname] = [region, instance.id] - - # Inventory: Group by instance ID (always a group of 1) - if self.group_by_instance_id: - self.inventory[instance.id] = [hostname] - if self.nested_groups: - self.push_group(self.inventory, 'instances', instance.id) - - # Inventory: Group by region - if self.group_by_region: - self.push(self.inventory, region, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'regions', region) - - # Inventory: Group by availability zone - if self.group_by_availability_zone: - self.push(self.inventory, instance.availability_zone, hostname) - if self.nested_groups: - if self.group_by_region: - self.push_group(self.inventory, region, instance.availability_zone) - self.push_group(self.inventory, 'zones', instance.availability_zone) - - # Inventory: Group by instance type - if self.group_by_instance_type: - type_name = self.to_safe('type_' + instance.instance_class) - self.push(self.inventory, type_name, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'types', type_name) - - # Inventory: Group by VPC - if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id: - vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id) - self.push(self.inventory, vpc_id_name, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'vpcs', vpc_id_name) - - # Inventory: Group by security group - if self.group_by_security_group: - try: - if instance.security_group: - key = self.to_safe("security_group_" + instance.security_group.name) - self.push(self.inventory, key, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'security_groups', key) - - except AttributeError: - self.fail_with_error('\n'.join(['Package boto seems a bit older.', - 'Please upgrade boto >= 2.3.0.'])) - - # Inventory: Group by engine - if self.group_by_rds_engine: - self.push(self.inventory, self.to_safe("rds_" + instance.engine), hostname) - if self.nested_groups: - self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine)) - - # Inventory: Group by parameter group - if self.group_by_rds_parameter_group: - self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), hostname) - if self.nested_groups: - self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name)) - - # Global Tag: all RDS instances - self.push(self.inventory, 'rds', hostname) - - self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance) - self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest - - def add_elasticache_cluster(self, cluster, region): - ''' Adds an ElastiCache cluster to the inventory and index, as long as - it's nodes are addressable ''' - - # Only want available clusters unless all_elasticache_clusters is True - if not self.all_elasticache_clusters and cluster['CacheClusterStatus'] != 'available': - return - - # Select the best destination address - if 'ConfigurationEndpoint' in cluster and cluster['ConfigurationEndpoint']: - # Memcached cluster - dest = cluster['ConfigurationEndpoint']['Address'] - is_redis = False - else: - # Redis sigle node cluster - # Because all Redis clusters are single nodes, we'll merge the - # info from the cluster with info about the node - dest = cluster['CacheNodes'][0]['Endpoint']['Address'] - is_redis = True - - if not dest: - # Skip clusters we cannot address (e.g. private VPC subnet) - return - - # Add to index - self.index[dest] = [region, cluster['CacheClusterId']] - - # Inventory: Group by instance ID (always a group of 1) - if self.group_by_instance_id: - self.inventory[cluster['CacheClusterId']] = [dest] - if self.nested_groups: - self.push_group(self.inventory, 'instances', cluster['CacheClusterId']) - - # Inventory: Group by region - if self.group_by_region and not is_redis: - self.push(self.inventory, region, dest) - if self.nested_groups: - self.push_group(self.inventory, 'regions', region) - - # Inventory: Group by availability zone - if self.group_by_availability_zone and not is_redis: - self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest) - if self.nested_groups: - if self.group_by_region: - self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone']) - self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone']) - - # Inventory: Group by node type - if self.group_by_instance_type and not is_redis: - type_name = self.to_safe('type_' + cluster['CacheNodeType']) - self.push(self.inventory, type_name, dest) - if self.nested_groups: - self.push_group(self.inventory, 'types', type_name) - - # Inventory: Group by VPC (information not available in the current - # AWS API version for ElastiCache) - - # Inventory: Group by security group - if self.group_by_security_group and not is_redis: - - # Check for the existence of the 'SecurityGroups' key and also if - # this key has some value. When the cluster is not placed in a SG - # the query can return None here and cause an error. - if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None: - for security_group in cluster['SecurityGroups']: - key = self.to_safe("security_group_" + security_group['SecurityGroupId']) - self.push(self.inventory, key, dest) - if self.nested_groups: - self.push_group(self.inventory, 'security_groups', key) - - # Inventory: Group by engine - if self.group_by_elasticache_engine and not is_redis: - self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest) - if self.nested_groups: - self.push_group(self.inventory, 'elasticache_engines', self.to_safe(cluster['Engine'])) - - # Inventory: Group by parameter group - if self.group_by_elasticache_parameter_group: - self.push(self.inventory, self.to_safe("elasticache_parameter_group_" + cluster['CacheParameterGroup']['CacheParameterGroupName']), dest) - if self.nested_groups: - self.push_group(self.inventory, 'elasticache_parameter_groups', self.to_safe(cluster['CacheParameterGroup']['CacheParameterGroupName'])) - - # Inventory: Group by replication group - if self.group_by_elasticache_replication_group and 'ReplicationGroupId' in cluster and cluster['ReplicationGroupId']: - self.push(self.inventory, self.to_safe("elasticache_replication_group_" + cluster['ReplicationGroupId']), dest) - if self.nested_groups: - self.push_group(self.inventory, 'elasticache_replication_groups', self.to_safe(cluster['ReplicationGroupId'])) - - # Global Tag: all ElastiCache clusters - self.push(self.inventory, 'elasticache_clusters', cluster['CacheClusterId']) - - host_info = self.get_host_info_dict_from_describe_dict(cluster) - - self.inventory["_meta"]["hostvars"][dest] = host_info - - # Add the nodes - for node in cluster['CacheNodes']: - self.add_elasticache_node(node, cluster, region) - - def add_elasticache_node(self, node, cluster, region): - ''' Adds an ElastiCache node to the inventory and index, as long as - it is addressable ''' - - # Only want available nodes unless all_elasticache_nodes is True - if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available': - return - - # Select the best destination address - dest = node['Endpoint']['Address'] - - if not dest: - # Skip nodes we cannot address (e.g. private VPC subnet) - return - - node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId']) - - # Add to index - self.index[dest] = [region, node_id] - - # Inventory: Group by node ID (always a group of 1) - if self.group_by_instance_id: - self.inventory[node_id] = [dest] - if self.nested_groups: - self.push_group(self.inventory, 'instances', node_id) - - # Inventory: Group by region - if self.group_by_region: - self.push(self.inventory, region, dest) - if self.nested_groups: - self.push_group(self.inventory, 'regions', region) - - # Inventory: Group by availability zone - if self.group_by_availability_zone: - self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest) - if self.nested_groups: - if self.group_by_region: - self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone']) - self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone']) - - # Inventory: Group by node type - if self.group_by_instance_type: - type_name = self.to_safe('type_' + cluster['CacheNodeType']) - self.push(self.inventory, type_name, dest) - if self.nested_groups: - self.push_group(self.inventory, 'types', type_name) - - # Inventory: Group by VPC (information not available in the current - # AWS API version for ElastiCache) - - # Inventory: Group by security group - if self.group_by_security_group: - - # Check for the existence of the 'SecurityGroups' key and also if - # this key has some value. When the cluster is not placed in a SG - # the query can return None here and cause an error. - if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None: - for security_group in cluster['SecurityGroups']: - key = self.to_safe("security_group_" + security_group['SecurityGroupId']) - self.push(self.inventory, key, dest) - if self.nested_groups: - self.push_group(self.inventory, 'security_groups', key) - - # Inventory: Group by engine - if self.group_by_elasticache_engine: - self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest) - if self.nested_groups: - self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine'])) - - # Inventory: Group by parameter group (done at cluster level) - - # Inventory: Group by replication group (done at cluster level) - - # Inventory: Group by ElastiCache Cluster - if self.group_by_elasticache_cluster: - self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest) - - # Global Tag: all ElastiCache nodes - self.push(self.inventory, 'elasticache_nodes', dest) - - host_info = self.get_host_info_dict_from_describe_dict(node) - - if dest in self.inventory["_meta"]["hostvars"]: - self.inventory["_meta"]["hostvars"][dest].update(host_info) - else: - self.inventory["_meta"]["hostvars"][dest] = host_info - - def add_elasticache_replication_group(self, replication_group, region): - ''' Adds an ElastiCache replication group to the inventory and index ''' - - # Only want available clusters unless all_elasticache_replication_groups is True - if not self.all_elasticache_replication_groups and replication_group['Status'] != 'available': - return - - # Skip clusters we cannot address (e.g. private VPC subnet or clustered redis) - if replication_group['NodeGroups'][0]['PrimaryEndpoint'] is None or \ - replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address'] is None: - return - - # Select the best destination address (PrimaryEndpoint) - dest = replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address'] - - # Add to index - self.index[dest] = [region, replication_group['ReplicationGroupId']] - - # Inventory: Group by ID (always a group of 1) - if self.group_by_instance_id: - self.inventory[replication_group['ReplicationGroupId']] = [dest] - if self.nested_groups: - self.push_group(self.inventory, 'instances', replication_group['ReplicationGroupId']) - - # Inventory: Group by region - if self.group_by_region: - self.push(self.inventory, region, dest) - if self.nested_groups: - self.push_group(self.inventory, 'regions', region) - - # Inventory: Group by availability zone (doesn't apply to replication groups) - - # Inventory: Group by node type (doesn't apply to replication groups) - - # Inventory: Group by VPC (information not available in the current - # AWS API version for replication groups - - # Inventory: Group by security group (doesn't apply to replication groups) - # Check this value in cluster level - - # Inventory: Group by engine (replication groups are always Redis) - if self.group_by_elasticache_engine: - self.push(self.inventory, 'elasticache_redis', dest) - if self.nested_groups: - self.push_group(self.inventory, 'elasticache_engines', 'redis') - - # Global Tag: all ElastiCache clusters - self.push(self.inventory, 'elasticache_replication_groups', replication_group['ReplicationGroupId']) - - host_info = self.get_host_info_dict_from_describe_dict(replication_group) - - self.inventory["_meta"]["hostvars"][dest] = host_info - - def get_route53_records(self): - ''' Get and store the map of resource records to domain names that - point to them. ''' - - if self.boto_profile: - r53_conn = route53.Route53Connection(profile_name=self.boto_profile) - else: - r53_conn = route53.Route53Connection() - all_zones = r53_conn.get_zones() - - route53_zones = [zone for zone in all_zones if zone.name[:-1] not in self.route53_excluded_zones] - - self.route53_records = {} - - for zone in route53_zones: - rrsets = r53_conn.get_all_rrsets(zone.id) - - for record_set in rrsets: - record_name = record_set.name - - if record_name.endswith('.'): - record_name = record_name[:-1] - - for resource in record_set.resource_records: - self.route53_records.setdefault(resource, set()) - self.route53_records[resource].add(record_name) - - def get_instance_route53_names(self, instance): - ''' Check if an instance is referenced in the records we have from - Route53. If it is, return the list of domain names pointing to said - instance. If nothing points to it, return an empty list. ''' - - instance_attributes = ['public_dns_name', 'private_dns_name', - 'ip_address', 'private_ip_address'] - - name_list = set() - - for attrib in instance_attributes: - try: - value = getattr(instance, attrib) - except AttributeError: - continue - - if value in self.route53_records: - name_list.update(self.route53_records[value]) - - return list(name_list) - - def get_host_info_dict_from_instance(self, instance): - instance_vars = {} - for key in vars(instance): - value = getattr(instance, key) - key = self.to_safe('ec2_' + key) - - # Handle complex types - # state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518 - if key == 'ec2__state': - instance_vars['ec2_state'] = instance.state or '' - instance_vars['ec2_state_code'] = instance.state_code - elif key == 'ec2__previous_state': - instance_vars['ec2_previous_state'] = instance.previous_state or '' - instance_vars['ec2_previous_state_code'] = instance.previous_state_code - elif isinstance(value, (int, bool)): - instance_vars[key] = value - elif isinstance(value, six.string_types): - instance_vars[key] = value.strip() - elif value is None: - instance_vars[key] = '' - elif key == 'ec2_region': - instance_vars[key] = value.name - elif key == 'ec2__placement': - instance_vars['ec2_placement'] = value.zone - elif key == 'ec2_tags': - for k, v in value.items(): - if self.expand_csv_tags and ',' in v: - v = list(map(lambda x: x.strip(), v.split(','))) - key = self.to_safe('ec2_tag_' + k) - instance_vars[key] = v - elif key == 'ec2_groups': - group_ids = [] - group_names = [] - for group in value: - group_ids.append(group.id) - group_names.append(group.name) - instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids]) - instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names]) - elif key == 'ec2_block_device_mapping': - instance_vars["ec2_block_devices"] = {} - for k, v in value.items(): - instance_vars["ec2_block_devices"][os.path.basename(k)] = v.volume_id - else: - pass - # TODO Product codes if someone finds them useful - # print key - # print type(value) - # print value - - instance_vars[self.to_safe('ec2_account_id')] = self.aws_account_id - - return instance_vars - - def get_host_info_dict_from_describe_dict(self, describe_dict): - ''' Parses the dictionary returned by the API call into a flat list - of parameters. This method should be used only when 'describe' is - used directly because Boto doesn't provide specific classes. ''' - - # I really don't agree with prefixing everything with 'ec2' - # because EC2, RDS and ElastiCache are different services. - # I'm just following the pattern used until now to not break any - # compatibility. - - host_info = {} - for key in describe_dict: - value = describe_dict[key] - key = self.to_safe('ec2_' + self.uncammelize(key)) - - # Handle complex types - - # Target: Memcached Cache Clusters - if key == 'ec2_configuration_endpoint' and value: - host_info['ec2_configuration_endpoint_address'] = value['Address'] - host_info['ec2_configuration_endpoint_port'] = value['Port'] - - # Target: Cache Nodes and Redis Cache Clusters (single node) - if key == 'ec2_endpoint' and value: - host_info['ec2_endpoint_address'] = value['Address'] - host_info['ec2_endpoint_port'] = value['Port'] - - # Target: Redis Replication Groups - if key == 'ec2_node_groups' and value: - host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address'] - host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port'] - replica_count = 0 - for node in value[0]['NodeGroupMembers']: - if node['CurrentRole'] == 'primary': - host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address'] - host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port'] - host_info['ec2_primary_cluster_id'] = node['CacheClusterId'] - elif node['CurrentRole'] == 'replica': - host_info['ec2_replica_cluster_address_' + str(replica_count)] = node['ReadEndpoint']['Address'] - host_info['ec2_replica_cluster_port_' + str(replica_count)] = node['ReadEndpoint']['Port'] - host_info['ec2_replica_cluster_id_' + str(replica_count)] = node['CacheClusterId'] - replica_count += 1 - - # Target: Redis Replication Groups - if key == 'ec2_member_clusters' and value: - host_info['ec2_member_clusters'] = ','.join([str(i) for i in value]) - - # Target: All Cache Clusters - elif key == 'ec2_cache_parameter_group': - host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']]) - host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName'] - host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus'] - - # Target: Almost everything - elif key == 'ec2_security_groups': - - # Skip if SecurityGroups is None - # (it is possible to have the key defined but no value in it). - if value is not None: - sg_ids = [] - for sg in value: - sg_ids.append(sg['SecurityGroupId']) - host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids]) - - # Target: Everything - # Preserve booleans and integers - elif isinstance(value, (int, bool)): - host_info[key] = value - - # Target: Everything - # Sanitize string values - elif isinstance(value, six.string_types): - host_info[key] = value.strip() - - # Target: Everything - # Replace None by an empty string - elif value is None: - host_info[key] = '' - - else: - # Remove non-processed complex types - pass - - return host_info - - def get_host_info(self): - ''' Get variables about a specific host ''' - - if len(self.index) == 0: - # Need to load index from cache - self.load_index_from_cache() - - if self.args.host not in self.index: - # try updating the cache - self.do_api_calls_update_cache() - if self.args.host not in self.index: - # host might not exist anymore - return self.json_format_dict({}, True) - - (region, instance_id) = self.index[self.args.host] - - instance = self.get_instance(region, instance_id) - return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True) - - def push(self, my_dict, key, element): - ''' Push an element onto an array that may not have been defined in - the dict ''' - group_info = my_dict.setdefault(key, []) - if isinstance(group_info, dict): - host_list = group_info.setdefault('hosts', []) - host_list.append(element) - else: - group_info.append(element) - - def push_group(self, my_dict, key, element): - ''' Push a group as a child of another group. ''' - parent_group = my_dict.setdefault(key, {}) - if not isinstance(parent_group, dict): - parent_group = my_dict[key] = {'hosts': parent_group} - child_groups = parent_group.setdefault('children', []) - if element not in child_groups: - child_groups.append(element) - - def get_inventory_from_cache(self): - ''' Reads the inventory from the cache file and returns it as a JSON - object ''' - - with open(self.cache_path_cache, 'r') as f: - json_inventory = f.read() - return json_inventory - - def load_index_from_cache(self): - ''' Reads the index from the cache file sets self.index ''' - - with open(self.cache_path_index, 'rb') as f: - self.index = json.load(f) - - def write_to_cache(self, data, filename): - ''' Writes data in JSON format to a file ''' - - json_data = self.json_format_dict(data, True) - with open(filename, 'w') as f: - f.write(json_data) - - def uncammelize(self, key): - temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key) - return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower() - - def to_safe(self, word): - ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' - regex = "[^A-Za-z0-9\_" - if not self.replace_dash_in_groups: - regex += "\-" - return re.sub(regex + "]", "_", word) - - def json_format_dict(self, data, pretty=False): - ''' Converts a dict to a JSON object and dumps it as a formatted - string ''' - - if pretty: - return json.dumps(data, sort_keys=True, indent=2) - else: - return json.dumps(data) - - -if __name__ == '__main__': - # Run the script - Ec2Inventory() diff --git a/contrib/testnets/remote/ansible/logzio.yml b/contrib/testnets/remote/ansible/logzio.yml deleted file mode 100644 index 7ad28193..00000000 --- a/contrib/testnets/remote/ansible/logzio.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- - -#Note: You need to add LOGZIO_TOKEN variable with your API key. Like this: ansible-playbook -e LOGZIO_TOKEN=ABCXYZ123456 - -- hosts: all - any_errors_fatal: true - gather_facts: no - vars: - - service: gaiad - - JOURNALBEAT_BINARY: "{{lookup('env', 'GOPATH')}}/bin/journalbeat" - roles: - - logzio - diff --git a/contrib/testnets/remote/ansible/remove-datadog-agent.yml b/contrib/testnets/remote/ansible/remove-datadog-agent.yml deleted file mode 100644 index 32679c3b..00000000 --- a/contrib/testnets/remote/ansible/remove-datadog-agent.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- - -- hosts: all - any_errors_fatal: true - gather_facts: no - roles: - - remove-datadog-agent - diff --git a/contrib/testnets/remote/ansible/roles/add-lcd/defaults/main.yml b/contrib/testnets/remote/ansible/roles/add-lcd/defaults/main.yml deleted file mode 100644 index 16a85e0d..00000000 --- a/contrib/testnets/remote/ansible/roles/add-lcd/defaults/main.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- - -GAIACLI_ADDRESS: tcp://0.0.0.0:1317 - diff --git a/contrib/testnets/remote/ansible/roles/add-lcd/handlers/main.yml b/contrib/testnets/remote/ansible/roles/add-lcd/handlers/main.yml deleted file mode 100644 index 2ce6b83e..00000000 --- a/contrib/testnets/remote/ansible/roles/add-lcd/handlers/main.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- - -- name: systemctl - systemd: name=gaiacli enabled=yes daemon_reload=yes - -- name: restart gaiacli - service: name=gaiacli state=restarted - - diff --git a/contrib/testnets/remote/ansible/roles/add-lcd/tasks/main.yml b/contrib/testnets/remote/ansible/roles/add-lcd/tasks/main.yml deleted file mode 100644 index d0fbd813..00000000 --- a/contrib/testnets/remote/ansible/roles/add-lcd/tasks/main.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- - -- name: Copy binary - copy: - src: "{{GAIACLI_BINARY}}" - dest: /usr/bin/gaiacli - mode: 0755 - notify: restart gaiacli - -- name: Copy service - template: - src: gaiacli.service.j2 - dest: /etc/systemd/system/gaiacli.service - notify: systemctl - diff --git a/contrib/testnets/remote/ansible/roles/add-lcd/templates/gaiacli.service.j2 b/contrib/testnets/remote/ansible/roles/add-lcd/templates/gaiacli.service.j2 deleted file mode 100644 index 67cbeaee..00000000 --- a/contrib/testnets/remote/ansible/roles/add-lcd/templates/gaiacli.service.j2 +++ /dev/null @@ -1,17 +0,0 @@ -[Unit] -Description=gaiacli -Requires=network-online.target -After=network-online.target - -[Service] -Restart=on-failure -User=gaiad -Group=gaiad -PermissionsStartOnly=true -ExecStart=/usr/bin/gaiacli rest-server --laddr {{GAIACLI_ADDRESS}} -ExecReload=/bin/kill -HUP $MAINPID -KillSignal=SIGTERM - -[Install] -WantedBy=multi-user.target - diff --git a/contrib/testnets/remote/ansible/roles/clear-config/tasks/main.yml b/contrib/testnets/remote/ansible/roles/clear-config/tasks/main.yml deleted file mode 100644 index 9e853f1d..00000000 --- a/contrib/testnets/remote/ansible/roles/clear-config/tasks/main.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -- name: Stop service - service: name=gaiad state=stopped - -- name: Delete files - file: "path={{item}} state=absent" - with_items: - - /usr/bin/gaiad - - /home/gaiad/.gaia diff --git a/contrib/testnets/remote/ansible/roles/extract-config/defaults/main.yml b/contrib/testnets/remote/ansible/roles/extract-config/defaults/main.yml deleted file mode 100644 index a535d201..00000000 --- a/contrib/testnets/remote/ansible/roles/extract-config/defaults/main.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- - -TESTNET_NAME: remotenet - diff --git a/contrib/testnets/remote/ansible/roles/extract-config/tasks/main.yml b/contrib/testnets/remote/ansible/roles/extract-config/tasks/main.yml deleted file mode 100644 index 2ec38e06..00000000 --- a/contrib/testnets/remote/ansible/roles/extract-config/tasks/main.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- - -- name: Fetch genesis.json - fetch: "src=/home/gaiad/.gaia/config/genesis.json dest={{GENESISFILE}} flat=yes" - run_once: yes - become: yes - become_user: gaiad - -- name: Fetch config.toml - fetch: "src=/home/gaiad/.gaia/config/config.toml dest={{CONFIGFILE}} flat=yes" - run_once: yes - become: yes - become_user: gaiad - diff --git a/contrib/testnets/remote/ansible/roles/increase-openfiles/files/50-fs.conf b/contrib/testnets/remote/ansible/roles/increase-openfiles/files/50-fs.conf deleted file mode 100644 index 5193edd2..00000000 --- a/contrib/testnets/remote/ansible/roles/increase-openfiles/files/50-fs.conf +++ /dev/null @@ -1 +0,0 @@ -fs.file-max=262144 diff --git a/contrib/testnets/remote/ansible/roles/increase-openfiles/files/91-nofiles.conf b/contrib/testnets/remote/ansible/roles/increase-openfiles/files/91-nofiles.conf deleted file mode 100644 index 929081c6..00000000 --- a/contrib/testnets/remote/ansible/roles/increase-openfiles/files/91-nofiles.conf +++ /dev/null @@ -1,3 +0,0 @@ -* soft nofile 262144 -* hard nofile 262144 - diff --git a/contrib/testnets/remote/ansible/roles/increase-openfiles/files/limits.conf b/contrib/testnets/remote/ansible/roles/increase-openfiles/files/limits.conf deleted file mode 100644 index d3fcd2e8..00000000 --- a/contrib/testnets/remote/ansible/roles/increase-openfiles/files/limits.conf +++ /dev/null @@ -1,3 +0,0 @@ -[Service] -LimitNOFILE=infinity -LimitMEMLOCK=infinity diff --git a/contrib/testnets/remote/ansible/roles/increase-openfiles/handlers/main.yml b/contrib/testnets/remote/ansible/roles/increase-openfiles/handlers/main.yml deleted file mode 100644 index d4960230..00000000 --- a/contrib/testnets/remote/ansible/roles/increase-openfiles/handlers/main.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- - -- name: reload systemctl - systemd: name=systemd daemon_reload=yes - diff --git a/contrib/testnets/remote/ansible/roles/increase-openfiles/tasks/main.yml b/contrib/testnets/remote/ansible/roles/increase-openfiles/tasks/main.yml deleted file mode 100644 index 78432f5b..00000000 --- a/contrib/testnets/remote/ansible/roles/increase-openfiles/tasks/main.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- -# Based on: https://stackoverflow.com/questions/38155108/how-to-increase-limit-for-open-processes-and-files-using-ansible - -- name: Set sysctl File Limits - copy: - src: 50-fs.conf - dest: /etc/sysctl.d - -- name: Set Shell File Limits - copy: - src: 91-nofiles.conf - dest: /etc/security/limits.d - -- name: Set gaia filehandle Limits - copy: - src: limits.conf - dest: "/lib/systemd/system/{{item}}.service.d" - notify: reload systemctl - with_items: - - gaiad - - gaiacli - diff --git a/contrib/testnets/remote/ansible/roles/install-datadog-agent/handlers/main.yml b/contrib/testnets/remote/ansible/roles/install-datadog-agent/handlers/main.yml deleted file mode 100644 index 04f72b74..00000000 --- a/contrib/testnets/remote/ansible/roles/install-datadog-agent/handlers/main.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- - -- name: restart datadog-agent - service: name=datadog-agent state=restarted - -- name: restart rsyslog - service: name=rsyslog state=restarted - -- name: restart journald - service: name=systemd-journald state=restarted diff --git a/contrib/testnets/remote/ansible/roles/install-datadog-agent/tasks/main.yml b/contrib/testnets/remote/ansible/roles/install-datadog-agent/tasks/main.yml deleted file mode 100644 index 4d5aa187..00000000 --- a/contrib/testnets/remote/ansible/roles/install-datadog-agent/tasks/main.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- - -- name: Remove old datadog.yaml, if exist - file: path=/etc/datadog-agent/datadog.yaml state=absent - notify: restart datadog-agent - -- name: Download DataDog agent script - get_url: url=https://raw.githubusercontent.com/DataDog/datadog-agent/master/cmd/agent/install_script.sh dest=/tmp/datadog-agent-install.sh mode=0755 - -- name: Install DataDog agent - command: "/tmp/datadog-agent-install.sh" - environment: - DD_API_KEY: "{{DD_API_KEY}}" - DD_HOST_TAGS: "testnet:{{TESTNET_NAME}},cluster:{{CLUSTER_NAME}}" - diff --git a/contrib/testnets/remote/ansible/roles/logzio/files/journalbeat.service b/contrib/testnets/remote/ansible/roles/logzio/files/journalbeat.service deleted file mode 100644 index 3cb66a45..00000000 --- a/contrib/testnets/remote/ansible/roles/logzio/files/journalbeat.service +++ /dev/null @@ -1,15 +0,0 @@ -[Unit] -Description=journalbeat -#propagates activation, deactivation and activation fails. -Requires=network-online.target -After=network-online.target - -[Service] -Restart=on-failure -ExecStart=/usr/bin/journalbeat -c /etc/journalbeat/journalbeat.yml -path.home /usr/share/journalbeat -path.config /etc/journalbeat -path.data /var/lib/journalbeat -path.logs /var/log/journalbeat -Restart=always - -[Install] -WantedBy=multi-user.target - - diff --git a/contrib/testnets/remote/ansible/roles/logzio/handlers/main.yml b/contrib/testnets/remote/ansible/roles/logzio/handlers/main.yml deleted file mode 100644 index 0b371fc5..00000000 --- a/contrib/testnets/remote/ansible/roles/logzio/handlers/main.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- - -- name: reload daemon - command: "systemctl daemon-reload" - -- name: restart journalbeat - service: name=journalbeat state=restarted - diff --git a/contrib/testnets/remote/ansible/roles/logzio/tasks/main.yml b/contrib/testnets/remote/ansible/roles/logzio/tasks/main.yml deleted file mode 100644 index ab3976f2..00000000 --- a/contrib/testnets/remote/ansible/roles/logzio/tasks/main.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- - -- name: Copy journalbeat binary - copy: src="{{JOURNALBEAT_BINARY}}" dest=/usr/bin/journalbeat mode=0755 - notify: restart journalbeat - -- name: Create folders - file: "path={{item}} state=directory recurse=yes" - with_items: - - /etc/journalbeat - - /etc/pki/tls/certs - - /usr/share/journalbeat - - /var/log/journalbeat - -- name: Copy journalbeat config - template: src=journalbeat.yml.j2 dest=/etc/journalbeat/journalbeat.yml mode=0600 - notify: restart journalbeat - -- name: Get server certificate for Logz.io - get_url: "url=https://raw.githubusercontent.com/logzio/public-certificates/master/COMODORSADomainValidationSecureServerCA.crt force=yes dest=/etc/pki/tls/certs/COMODORSADomainValidationSecureServerCA.crt" - -- name: Copy journalbeat service config - copy: src=journalbeat.service dest=/etc/systemd/system/journalbeat.service - notify: - - reload daemon - - restart journalbeat - diff --git a/contrib/testnets/remote/ansible/roles/logzio/templates/journalbeat.yml.j2 b/contrib/testnets/remote/ansible/roles/logzio/templates/journalbeat.yml.j2 deleted file mode 100644 index af2ac4f1..00000000 --- a/contrib/testnets/remote/ansible/roles/logzio/templates/journalbeat.yml.j2 +++ /dev/null @@ -1,342 +0,0 @@ -#======================== Journalbeat Configuration ============================ - -journalbeat: - # What position in journald to seek to at start up - # options: cursor, tail, head (defaults to tail) - #seek_position: tail - - # If seek_position is set to cursor and seeking to cursor fails - # fall back to this method. If set to none will it will exit - # options: tail, head, none (defaults to tail) - #cursor_seek_fallback: tail - - # Store the cursor of the successfully published events - #write_cursor_state: true - - # Path to the file to store the cursor (defaults to ".journalbeat-cursor-state") - #cursor_state_file: .journalbeat-cursor-state - - # How frequently should we save the cursor to disk (defaults to 5s) - #cursor_flush_period: 5s - - # Path to the file to store the queue of events pending (defaults to ".journalbeat-pending-queue") - #pending_queue.file: .journalbeat-pending-queue - - # How frequently should we save the queue to disk (defaults to 1s). - # Pending queue represents the WAL of events queued to be published - # or being published and waiting for acknowledgement. In case of a - # regular restart of journalbeat all the events not yet acknowledged - # will be flushed to disk during the shutdown. - # In case of disaster most probably journalbeat won't get a chance to shutdown - # itself gracefully and this flush period option will serve you as a - # backup creation frequency option. - #pending_queue.flush_period: 1s - - # Lowercase and remove leading underscores, e.g. "_MESSAGE" -> "message" - # (defaults to false) - #clean_field_names: false - - # All journal entries are strings by default. You can try to convert them to numbers. - # (defaults to false) - #convert_to_numbers: false - - # Store all the fields of the Systemd Journal entry under this field - # Can be almost any string suitable to be a field name of an ElasticSearch document. - # Dots can be used to create nested fields. - # Two exceptions: - # - no repeated dots; - # - no trailing dots, e.g. "journal..field_name." will fail - # (defaults to "" hence stores on the upper level of the event) - #move_metadata_to_field: "" - - # Specific units to monitor. - units: ["{{service}}.service","gaiacli.service"] - - # Specify Journal paths to open. You can pass an array of paths to Systemd Journal paths. - # If you want to open Journal from directory just pass an array consisting of one element - # representing the path. See: https://www.freedesktop.org/software/systemd/man/sd_journal_open.html - # By default this setting is empty thus journalbeat will attempt to find all journal files automatically - #journal_paths: ["/var/log/journal"] - - #default_type: journal - -#================================ General ====================================== - -# The name of the shipper that publishes the network data. It can be used to group -# all the transactions sent by a single shipper in the web interface. -# If this options is not defined, the hostname is used. -#name: journalbeat - -# The tags of the shipper are included in their own field with each -# transaction published. Tags make it easy to group servers by different -# logical properties. -tags: ["{{service}}"] - -# Optional fields that you can specify to add additional information to the -# output. Fields can be scalar values, arrays, dictionaries, or any nested -# combination of these. -fields: - logzio_codec: plain - token: {{LOGZIO_TOKEN}} - -# If this option is set to true, the custom fields are stored as top-level -# fields in the output document instead of being grouped under a fields -# sub-dictionary. Default is false. -fields_under_root: true - -# Internal queue size for single events in processing pipeline -#queue_size: 1000 - -# The internal queue size for bulk events in the processing pipeline. -# Do not modify this value. -#bulk_queue_size: 0 - -# Sets the maximum number of CPUs that can be executing simultaneously. The -# default is the number of logical CPUs available in the system. -#max_procs: - -#================================ Processors =================================== - -# Processors are used to reduce the number of fields in the exported event or to -# enhance the event with external metadata. This section defines a list of -# processors that are applied one by one and the first one receives the initial -# event: -# -# event -> filter1 -> event1 -> filter2 ->event2 ... -# -# The supported processors are drop_fields, drop_event, include_fields, and -# add_cloud_metadata. -# -# For example, you can use the following processors to keep the fields that -# contain CPU load percentages, but remove the fields that contain CPU ticks -# values: -# -processors: -#- include_fields: -# fields: ["cpu"] -- drop_fields: - fields: ["beat.name", "beat.version", "logzio_codec", "SYSLOG_IDENTIFIER", "SYSLOG_FACILITY", "PRIORITY"] -# -# The following example drops the events that have the HTTP response code 200: -# -#processors: -#- drop_event: -# when: -# equals: -# http.code: 200 -# -# The following example enriches each event with metadata from the cloud -# provider about the host machine. It works on EC2, GCE, and DigitalOcean. -# -#processors: -#- add_cloud_metadata: -# - -#================================ Outputs ====================================== - -# Configure what outputs to use when sending the data collected by the beat. -# Multiple outputs may be used. - -#----------------------------- Logstash output --------------------------------- -output.logstash: - # Boolean flag to enable or disable the output module. - enabled: true - - # The Logstash hosts - hosts: ["listener.logz.io:5015"] - - # Number of workers per Logstash host. - #worker: 1 - - # Set gzip compression level. - #compression_level: 3 - - # Optional load balance the events between the Logstash hosts - #loadbalance: true - - # Number of batches to be send asynchronously to logstash while processing - # new batches. - #pipelining: 0 - - # Optional index name. The default index name is set to name of the beat - # in all lowercase. - #index: 'beatname' - - # SOCKS5 proxy server URL - #proxy_url: socks5://user:password@socks5-server:2233 - - # Resolve names locally when using a proxy server. Defaults to false. - #proxy_use_local_resolver: false - - # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. - ssl.enabled: true - - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. - ssl.verification_mode: full - - # List of supported/valid TLS versions. By default all TLS versions 1.0 up to - # 1.2 are enabled. - #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] - - # Optional SSL configuration options. SSL is off by default. - # List of root certificates for HTTPS server verifications - ssl.certificate_authorities: ["/etc/pki/tls/certs/COMODORSADomainValidationSecureServerCA.crt"] - - # Certificate for SSL client authentication - #ssl.certificate: "/etc/pki/client/cert.pem" - - # Client Certificate Key - #ssl.key: "/etc/pki/client/cert.key" - - # Optional passphrase for decrypting the Certificate Key. - #ssl.key_passphrase: '' - - # Configure cipher suites to be used for SSL connections - #ssl.cipher_suites: [] - - # Configure curve types for ECDHE based cipher suites - #ssl.curve_types: [] - -#------------------------------- File output ----------------------------------- -#output.file: - # Boolean flag to enable or disable the output module. - #enabled: true - - # Path to the directory where to save the generated files. The option is - # mandatory. - #path: "/tmp/beatname" - - # Name of the generated files. The default is `beatname` and it generates - # files: `beatname`, `beatname.1`, `beatname.2`, etc. - #filename: beatname - - # Maximum size in kilobytes of each file. When this size is reached, and on - # every beatname restart, the files are rotated. The default value is 10240 - # kB. - #rotate_every_kb: 10000 - - # Maximum number of files under path. When this number of files is reached, - # the oldest file is deleted and the rest are shifted from last to first. The - # default is 7 files. - #number_of_files: 7 - - -#----------------------------- Console output --------------------------------- -#output.console: - # Boolean flag to enable or disable the output module. - #enabled: true - - # Pretty print json event - #pretty: false - -#================================= Paths ====================================== - -# The home path for the beatname installation. This is the default base path -# for all other path settings and for miscellaneous files that come with the -# distribution (for example, the sample dashboards). -# If not set by a CLI flag or in the configuration file, the default for the -# home path is the location of the binary. -#path.home: - -# The configuration path for the beatname installation. This is the default -# base path for configuration files, including the main YAML configuration file -# and the Elasticsearch template file. If not set by a CLI flag or in the -# configuration file, the default for the configuration path is the home path. -#path.config: ${path.home} - -# The data path for the beatname installation. This is the default base path -# for all the files in which beatname needs to store its data. If not set by a -# CLI flag or in the configuration file, the default for the data path is a data -# subdirectory inside the home path. -#path.data: ${path.home}/data - -# The logs path for a beatname installation. This is the default location for -# the Beat's log files. If not set by a CLI flag or in the configuration file, -# the default for the logs path is a logs subdirectory inside the home path. -#path.logs: ${path.home}/logs - -#============================== Dashboards ===================================== -# These settings control loading the sample dashboards to the Kibana index. Loading -# the dashboards is disabled by default and can be enabled either by setting the -# options here, or by using the `-setup` CLI flag. -#dashboards.enabled: false - -# The URL from where to download the dashboards archive. By default this URL -# has a value which is computed based on the Beat name and version. For released -# versions, this URL points to the dashboard archive on the artifacts.elastic.co -# website. -#dashboards.url: - -# The directory from where to read the dashboards. It is used instead of the URL -# when it has a value. -#dashboards.directory: - -# The file archive (zip file) from where to read the dashboards. It is used instead -# of the URL when it has a value. -#dashboards.file: - -# If this option is enabled, the snapshot URL is used instead of the default URL. -#dashboards.snapshot: false - -# The URL from where to download the snapshot version of the dashboards. By default -# this has a value which is computed based on the Beat name and version. -#dashboards.snapshot_url - -# In case the archive contains the dashboards from multiple Beats, this lets you -# select which one to load. You can load all the dashboards in the archive by -# setting this to the empty string. -#dashboards.beat: beatname - -# The name of the Kibana index to use for setting the configuration. Default is ".kibana" -#dashboards.kibana_index: .kibana - -# The Elasticsearch index name. This overwrites the index name defined in the -# dashboards and index pattern. Example: testbeat-* -#dashboards.index: - -#================================ Logging ====================================== -# There are three options for the log output: syslog, file, stderr. -# Under Windows systems, the log files are per default sent to the file output, -# under all other system per default to syslog. - -# Sets log level. The default log level is info. -# Available log levels are: critical, error, warning, info, debug -#logging.level: info - -# Enable debug output for selected components. To enable all selectors use ["*"] -# Other available selectors are "beat", "publish", "service" -# Multiple selectors can be chained. -#logging.selectors: [ ] - -# Send all logging output to syslog. The default is false. -#logging.to_syslog: true - -# If enabled, beatname periodically logs its internal metrics that have changed -# in the last period. For each metric that changed, the delta from the value at -# the beginning of the period is logged. Also, the total values for -# all non-zero internal metrics are logged on shutdown. The default is true. -#logging.metrics.enabled: true - -# The period after which to log the internal metrics. The default is 30s. -#logging.metrics.period: 30s - -# Logging to rotating files files. Set logging.to_files to false to disable logging to -# files. -logging.to_files: true -logging.files: - # Configure the path where the logs are written. The default is the logs directory - # under the home path (the binary location). - #path: /var/log/beatname - - # The name of the files where the logs are written to. - #name: beatname - - # Configure log file size limit. If limit is reached, log file will be - # automatically rotated - #rotateeverybytes: 10485760 # = 10MB - - # Number of rotated log files to keep. Oldest files will be deleted first. - #keepfiles: 7 diff --git a/contrib/testnets/remote/ansible/roles/remove-datadog-agent/tasks/main.yml b/contrib/testnets/remote/ansible/roles/remove-datadog-agent/tasks/main.yml deleted file mode 100644 index 73b027a2..00000000 --- a/contrib/testnets/remote/ansible/roles/remove-datadog-agent/tasks/main.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- - -- name: Stop datadog service - failed_when: false - service: name=datadog-agent state=stopped - -- name: Uninstall datadg-agent - yum: name=datadog-agent state=absent - -- name: Remove datadog-agent folder - file: path=/etc/datadog-agent state=absent - diff --git a/contrib/testnets/remote/ansible/roles/set-debug/files/sysconfig/gaiacli b/contrib/testnets/remote/ansible/roles/set-debug/files/sysconfig/gaiacli deleted file mode 100644 index 8ef3a7e0..00000000 --- a/contrib/testnets/remote/ansible/roles/set-debug/files/sysconfig/gaiacli +++ /dev/null @@ -1 +0,0 @@ -DAEMON_COREFILE_LIMIT='unlimited' diff --git a/contrib/testnets/remote/ansible/roles/set-debug/files/sysconfig/gaiad b/contrib/testnets/remote/ansible/roles/set-debug/files/sysconfig/gaiad deleted file mode 100644 index 8ef3a7e0..00000000 --- a/contrib/testnets/remote/ansible/roles/set-debug/files/sysconfig/gaiad +++ /dev/null @@ -1 +0,0 @@ -DAEMON_COREFILE_LIMIT='unlimited' diff --git a/contrib/testnets/remote/ansible/roles/set-debug/files/sysctl.d/10-procdump b/contrib/testnets/remote/ansible/roles/set-debug/files/sysctl.d/10-procdump deleted file mode 100644 index fbbbe051..00000000 --- a/contrib/testnets/remote/ansible/roles/set-debug/files/sysctl.d/10-procdump +++ /dev/null @@ -1,3 +0,0 @@ -kernel.core_uses_pid = 1 -kernel.core_pattern = /tmp/core-%e-%s-%u-%g-%p-%t -fs.suid_dumpable = 2 diff --git a/contrib/testnets/remote/ansible/roles/set-debug/handlers/main.yaml b/contrib/testnets/remote/ansible/roles/set-debug/handlers/main.yaml deleted file mode 100644 index 743ce09b..00000000 --- a/contrib/testnets/remote/ansible/roles/set-debug/handlers/main.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- - -- name: reload sysctl - command: "/sbin/sysctl -p" diff --git a/contrib/testnets/remote/ansible/roles/set-debug/tasks/main.yml b/contrib/testnets/remote/ansible/roles/set-debug/tasks/main.yml deleted file mode 100644 index 7497dabd..00000000 --- a/contrib/testnets/remote/ansible/roles/set-debug/tasks/main.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -# Based on https://www.cyberciti.biz/tips/linux-core-dumps.html - -- name: Copy sysctl and sysconfig files to enable app and daemon core dumps - file: src=. dest=/etc/ - notify: reload sysctl - -- name: Enable debugging for all apps - lineinfile: create=yes line="DAEMON_COREFILE_LIMIT='unlimited'" path=/etc/sysconfig/init regexp=^DAEMON_COREFILE_LIMIT= diff --git a/contrib/testnets/remote/ansible/roles/setup-fullnodes/defaults/main.yml b/contrib/testnets/remote/ansible/roles/setup-fullnodes/defaults/main.yml deleted file mode 100644 index a535d201..00000000 --- a/contrib/testnets/remote/ansible/roles/setup-fullnodes/defaults/main.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- - -TESTNET_NAME: remotenet - diff --git a/contrib/testnets/remote/ansible/roles/setup-fullnodes/files/gaiad.service b/contrib/testnets/remote/ansible/roles/setup-fullnodes/files/gaiad.service deleted file mode 100644 index 69716656..00000000 --- a/contrib/testnets/remote/ansible/roles/setup-fullnodes/files/gaiad.service +++ /dev/null @@ -1,17 +0,0 @@ -[Unit] -Description=gaiad -Requires=network-online.target -After=network-online.target - -[Service] -Restart=on-failure -User=gaiad -Group=gaiad -PermissionsStartOnly=true -ExecStart=/usr/bin/gaiad start -ExecReload=/bin/kill -HUP $MAINPID -KillSignal=SIGTERM - -[Install] -WantedBy=multi-user.target - diff --git a/contrib/testnets/remote/ansible/roles/setup-fullnodes/handlers/main.yml b/contrib/testnets/remote/ansible/roles/setup-fullnodes/handlers/main.yml deleted file mode 100644 index 987e2947..00000000 --- a/contrib/testnets/remote/ansible/roles/setup-fullnodes/handlers/main.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- - -- name: reload systemd - systemd: name=gaiad enabled=yes daemon_reload=yes - diff --git a/contrib/testnets/remote/ansible/roles/setup-fullnodes/tasks/main.yml b/contrib/testnets/remote/ansible/roles/setup-fullnodes/tasks/main.yml deleted file mode 100644 index 264bdd76..00000000 --- a/contrib/testnets/remote/ansible/roles/setup-fullnodes/tasks/main.yml +++ /dev/null @@ -1,61 +0,0 @@ ---- - -- name: Ensure keys folder exists locally - file: path=keys state=directory - connection: local - run_once: true - become: no - -- name: Create gaiad user - user: name=gaiad home=/home/gaiad shell=/bin/bash - -- name: Copy binary - copy: - src: "{{BINARY}}" - dest: /usr/bin - mode: 0755 - -- name: Copy service file - copy: src=gaiad.service dest=/etc/systemd/system/gaiad.service mode=0755 - notify: reload systemd - -- name: Get node ID - command: "cat /etc/nodeid" - changed_when: false - register: nodeid - -- name: gaiad init - command: "/usr/bin/gaiad init --chain-id={{TESTNET_NAME}} --name=fullnode{{nodeid.stdout_lines[0]}}" - become: yes - become_user: gaiad - register: initresult - args: - creates: /home/gaiad/.gaia/config - -- name: Get wallet word seed from result of initial transaction locally - when: initresult["changed"] - shell: "echo '{{initresult.stdout}}' | python -c 'import json,sys ; print json.loads(\"\".join(sys.stdin.readlines()))[\"app_message\"][\"secret\"]'" - changed_when: false - register: walletkey - connection: local - -- name: Write wallet word seed to local files - when: initresult["changed"] - copy: "content={{walletkey.stdout}} dest=keys/node{{nodeid.stdout_lines[0]}}" - become: no - connection: local - -- name: Copy genesis file - copy: - src: "{{GENESISFILE}}" - dest: /home/gaiad/.gaia/config/genesis.json - become: yes - become_user: gaiad - -- name: Copy config.toml file - copy: - src: "{{CONFIGFILE}}" - dest: /home/gaiad/.gaia/config/config.toml - become: yes - become_user: gaiad - diff --git a/contrib/testnets/remote/ansible/roles/setup-journald/handlers/main.yml b/contrib/testnets/remote/ansible/roles/setup-journald/handlers/main.yml deleted file mode 100644 index 14f3b337..00000000 --- a/contrib/testnets/remote/ansible/roles/setup-journald/handlers/main.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- - -- name: restart journald - service: name=systemd-journald state=restarted - diff --git a/contrib/testnets/remote/ansible/roles/setup-journald/tasks/main.yml b/contrib/testnets/remote/ansible/roles/setup-journald/tasks/main.yml deleted file mode 100644 index 130da520..00000000 --- a/contrib/testnets/remote/ansible/roles/setup-journald/tasks/main.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- - -- name: Disable journald rate-limiting - lineinfile: "dest=/etc/systemd/journald.conf regexp={{item.regexp}} line='{{item.line}}'" - with_items: - - { regexp: "^#RateLimitInterval", line: "RateLimitInterval=0s" } - - { regexp: "^#RateLimitBurst", line: "RateLimitBurst=0" } - - { regexp: "^#SystemMaxFileSize", line: "SystemMaxFileSize=100M" } - - { regexp: "^#SystemMaxUse", line: "SystemMaxUse=500M" } - - { regexp: "^#SystemMaxFiles", line: "SystemMaxFiles=10" } - notify: restart journald - -- name: Change logrotate to daily - lineinfile: "dest=/etc/logrotate.conf regexp={{item.regexp}} line='{{item.line}}'" - with_items: - - { regexp: "^weekly", line: "daily" } - - { regexp: "^#compress", line: "compress" } - -- name: Create journal directory for permanent logs - file: path=/var/log/journal state=directory - notify: restart journald - -- name: Set journal folder with systemd-tmpfiles - command: "systemd-tmpfiles --create --prefix /var/log/journal" - notify: restart journald - diff --git a/contrib/testnets/remote/ansible/roles/setup-validators/defaults/main.yml b/contrib/testnets/remote/ansible/roles/setup-validators/defaults/main.yml deleted file mode 100644 index a535d201..00000000 --- a/contrib/testnets/remote/ansible/roles/setup-validators/defaults/main.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- - -TESTNET_NAME: remotenet - diff --git a/contrib/testnets/remote/ansible/roles/setup-validators/files/gaiad.service b/contrib/testnets/remote/ansible/roles/setup-validators/files/gaiad.service deleted file mode 100644 index 69716656..00000000 --- a/contrib/testnets/remote/ansible/roles/setup-validators/files/gaiad.service +++ /dev/null @@ -1,17 +0,0 @@ -[Unit] -Description=gaiad -Requires=network-online.target -After=network-online.target - -[Service] -Restart=on-failure -User=gaiad -Group=gaiad -PermissionsStartOnly=true -ExecStart=/usr/bin/gaiad start -ExecReload=/bin/kill -HUP $MAINPID -KillSignal=SIGTERM - -[Install] -WantedBy=multi-user.target - diff --git a/contrib/testnets/remote/ansible/roles/setup-validators/handlers/main.yml b/contrib/testnets/remote/ansible/roles/setup-validators/handlers/main.yml deleted file mode 100644 index 987e2947..00000000 --- a/contrib/testnets/remote/ansible/roles/setup-validators/handlers/main.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- - -- name: reload systemd - systemd: name=gaiad enabled=yes daemon_reload=yes - diff --git a/contrib/testnets/remote/ansible/roles/setup-validators/tasks/main.yml b/contrib/testnets/remote/ansible/roles/setup-validators/tasks/main.yml deleted file mode 100644 index a50ffa84..00000000 --- a/contrib/testnets/remote/ansible/roles/setup-validators/tasks/main.yml +++ /dev/null @@ -1,78 +0,0 @@ ---- - -- name: Ensure keys folder exists locally - file: path=keys state=directory - connection: local - run_once: true - become: no - -- name: Create gaiad user - user: name=gaiad home=/home/gaiad shell=/bin/bash - -- name: Copy binary - copy: - src: "{{BINARY}}" - dest: /usr/bin - mode: 0755 - -- name: Copy service file - copy: src=gaiad.service dest=/etc/systemd/system/gaiad.service mode=0755 - notify: reload systemd - -- name: Get node ID - command: "cat /etc/nodeid" - changed_when: false - register: nodeid - -- name: Create initial transaction - command: "/usr/bin/gaiad init gen-tx --name=node{{nodeid.stdout_lines[0]}} --ip={{inventory_hostname}}" - register: gentxresult - become: yes - become_user: gaiad - args: - creates: /home/gaiad/.gaia/config/gentx - -- name: Get wallet word seed from result of initial transaction locally - when: gentxresult["changed"] - shell: "echo '{{gentxresult.stdout}}' | python -c 'import json,sys ; print json.loads(\"\".join(sys.stdin.readlines()))[\"app_message\"][\"secret\"]'" - changed_when: false - register: walletkey - connection: local - -- name: Write wallet word seed to local files - when: gentxresult["changed"] - copy: "content={{walletkey.stdout}} dest=keys/node{{nodeid.stdout_lines[0]}}" - become: no - connection: local - -- name: Find gentx file - command: "ls /home/gaiad/.gaia/config/gentx" - changed_when: false - register: gentxfile - -- name: Clear local gen-tx list - file: path=files/ state=absent - connection: local - run_once: yes - -- name: Get gen-tx file - fetch: - dest: files/ - src: "/home/gaiad/.gaia/config/gentx/{{gentxfile.stdout_lines[0]}}" - flat: yes - -- name: Compress gathered gen-tx files locally - archive: path=files/ exclude_path=files/gen-tx.tgz dest=files/gen-tx.tgz - run_once: yes - connection: local - -- name: Unpack gen-tx archive - unarchive: src=files/gen-tx.tgz dest=/home/gaiad/.gaia/config/gentx owner=gaiad - -- name: Generate genesis.json - command: "/usr/bin/gaiad init --with-txs --name=node{{nodeid.stdout_lines[0]}} --chain-id={{TESTNET_NAME}}" - become: yes - become_user: gaiad - args: - creates: /home/gaiad/.gaia/config/genesis.json - diff --git a/contrib/testnets/remote/ansible/roles/start/tasks/main.yml b/contrib/testnets/remote/ansible/roles/start/tasks/main.yml deleted file mode 100644 index 6bc611c9..00000000 --- a/contrib/testnets/remote/ansible/roles/start/tasks/main.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- - -- name: start service - service: "name={{service}} state=started" - diff --git a/contrib/testnets/remote/ansible/roles/stop/tasks/main.yml b/contrib/testnets/remote/ansible/roles/stop/tasks/main.yml deleted file mode 100644 index 7db356f2..00000000 --- a/contrib/testnets/remote/ansible/roles/stop/tasks/main.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- - -- name: stop service - service: "name={{service}} state=stopped" - diff --git a/contrib/testnets/remote/ansible/roles/update-datadog-agent/files/conf.d/http_check.d/conf.yaml b/contrib/testnets/remote/ansible/roles/update-datadog-agent/files/conf.d/http_check.d/conf.yaml deleted file mode 100644 index 6932ed6f..00000000 --- a/contrib/testnets/remote/ansible/roles/update-datadog-agent/files/conf.d/http_check.d/conf.yaml +++ /dev/null @@ -1,13 +0,0 @@ -init_config: - -instances: - - name: gaiad - url: http://localhost:26657/status - timeout: 1 - content_match: '"latest_block_height": "0",' - reverse_content_match: true - - - name: gaiacli - url: http://localhost:1317/node_version - timeout: 1 - diff --git a/contrib/testnets/remote/ansible/roles/update-datadog-agent/files/conf.d/network.d/conf.yaml b/contrib/testnets/remote/ansible/roles/update-datadog-agent/files/conf.d/network.d/conf.yaml deleted file mode 100644 index b174490f..00000000 --- a/contrib/testnets/remote/ansible/roles/update-datadog-agent/files/conf.d/network.d/conf.yaml +++ /dev/null @@ -1,9 +0,0 @@ -init_config: - -instances: - - collect_connection_state: true - excluded_interfaces: - - lo - - lo0 - collect_rate_metrics: true - collect_count_metrics: true diff --git a/contrib/testnets/remote/ansible/roles/update-datadog-agent/files/conf.d/process.d/conf.yaml b/contrib/testnets/remote/ansible/roles/update-datadog-agent/files/conf.d/process.d/conf.yaml deleted file mode 100644 index 465cadad..00000000 --- a/contrib/testnets/remote/ansible/roles/update-datadog-agent/files/conf.d/process.d/conf.yaml +++ /dev/null @@ -1,15 +0,0 @@ -init_config: - -instances: -- name: ssh - search_string: ['ssh', 'sshd'] - thresholds: - critical: [1, 5] -- name: gaiad - search_string: ['gaiad'] - thresholds: - critical: [1, 1] -- name: gaiacli - search_string: ['gaiacli'] - thresholds: - critical: [1, 1] diff --git a/contrib/testnets/remote/ansible/roles/update-datadog-agent/files/conf.d/prometheus.d/conf.yaml b/contrib/testnets/remote/ansible/roles/update-datadog-agent/files/conf.d/prometheus.d/conf.yaml deleted file mode 100644 index 20c04cee..00000000 --- a/contrib/testnets/remote/ansible/roles/update-datadog-agent/files/conf.d/prometheus.d/conf.yaml +++ /dev/null @@ -1,10 +0,0 @@ -init_config: - -instances: - - prometheus_url: http://127.0.0.1:26660 - metrics: - - go* - - mempool* - - p2p* - - process* - - promhttp* diff --git a/contrib/testnets/remote/ansible/roles/update-datadog-agent/handlers/main.yml b/contrib/testnets/remote/ansible/roles/update-datadog-agent/handlers/main.yml deleted file mode 100644 index 90e05c17..00000000 --- a/contrib/testnets/remote/ansible/roles/update-datadog-agent/handlers/main.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- - -- name: restart datadog-agent - service: name=datadog-agent state=restarted - diff --git a/contrib/testnets/remote/ansible/roles/update-datadog-agent/tasks/main.yml b/contrib/testnets/remote/ansible/roles/update-datadog-agent/tasks/main.yml deleted file mode 100644 index c6174c6a..00000000 --- a/contrib/testnets/remote/ansible/roles/update-datadog-agent/tasks/main.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- - -- name: Set datadog.yaml config - template: src=datadog.yaml.j2 dest=/etc/datadog-agent/datadog.yaml - notify: restart datadog-agent - -- name: Set metrics config - copy: src=conf.d/ dest=/etc/datadog-agent/conf.d/ - notify: restart datadog-agent - diff --git a/contrib/testnets/remote/ansible/roles/update-datadog-agent/templates/datadog.yaml.j2 b/contrib/testnets/remote/ansible/roles/update-datadog-agent/templates/datadog.yaml.j2 deleted file mode 100644 index 3c2e031d..00000000 --- a/contrib/testnets/remote/ansible/roles/update-datadog-agent/templates/datadog.yaml.j2 +++ /dev/null @@ -1,561 +0,0 @@ - -# The host of the Datadog intake server to send Agent data to -dd_url: https://app.datadoghq.com - -# The Datadog api key to associate your Agent's data with your organization. -# Can be found here: -# https://app.datadoghq.com/account/settings -api_key: {{DD_API_KEY}} - -# If you need a proxy to connect to the Internet, provide it here (default: -# disabled). You can use the 'no_proxy' list to specify hosts that should -# bypass the proxy. These settings might impact your checks requests, please -# refer to the specific check documentation for more details. Environment -# variables HTTP_PROXY, HTTPS_PROXY and NO_PROXY (coma-separated string) will -# override the values set here. See https://docs.datadoghq.com/agent/proxy/. -# -# proxy: -# http: http(s)://user:password@proxy_for_http:port -# https: http(s)://user:password@proxy_for_https:port -# no_proxy: -# - host1 -# - host2 - -# Setting this option to "yes" will tell the agent to skip validation of SSL/TLS certificates. -# This may be necessary if the agent is running behind a proxy. See this page for details: -# https://github.com/DataDog/dd-agent/wiki/Proxy-Configuration#using-haproxy-as-a-proxy -# skip_ssl_validation: no - -# Setting this option to "yes" will force the agent to only use TLS 1.2 when -# pushing data to the url specified in "dd_url". -force_tls_12: yes - -# Force the hostname to whatever you want. (default: auto-detected) -hostname: {{inventory_hostname | replace ("_","-")}} - -# Make the agent use "hostname -f" on unix-based systems as a last resort -# way of determining the hostname instead of Golang "os.Hostname()" -# This will be enabled by default in version 6.4 -# More information at https://dtdg.co/flag-hostname-fqdn -# hostname_fqdn: false - -# Set the host's tags (optional) -tags: ['testnet:{{TESTNET_NAME}}','cluster:{{CLUSTER_NAME}}'] -# - mytag -# - env:prod -# - role:database - -# Histogram and Historate configuration -# -# Configure which aggregated value to compute. Possible values are: min, max, -# median, avg, sum and count. -# -# histogram_aggregates: ["max", "median", "avg", "count"] -# -# Configure which percentiles will be computed. Must be a list of float -# between 0 and 1. -# Warning: percentiles must be specified as yaml strings -# -# histogram_percentiles: ["0.95"] -# -# Copy histogram values to distributions for true global distributions (in beta) -# This will increase the number of custom metrics created -# histogram_copy_to_distribution: false -# -# A prefix to add to distribution metrics created when histogram_copy_to_distributions is true -# histogram_copy_to_distribution_prefix: "" - -# Forwarder timeout in seconds -# forwarder_timeout: 20 - -# The forwarder retries failed requests. Use this setting to change the -# maximum length of the forwarder's retry queue (each request in the queue -# takes no more than 2MB in memory) -# forwarder_retry_queue_max_size: 30 - -# The number of workers used by the forwarder. Please note each worker will -# open an outbound HTTP connection towards Datadog's metrics intake at every -# flush. -# forwarder_num_workers: 1 - -# Collect AWS EC2 custom tags as agent tags -collect_ec2_tags: true - -# The path containing check configuration files -# By default, uses the conf.d folder located in the agent configuration folder. -# confd_path: - -# Additional path where to search for Python checks -# By default, uses the checks.d folder located in the agent configuration folder. -# additional_checksd: - -# The port for the go_expvar server -# expvar_port: 5000 - -# The port on which the IPC api listens -# cmd_port: 5001 - -# The port for the browser GUI to be served -# Setting 'GUI_port: -1' turns off the GUI completely -# Default is '5002' on Windows and macOS ; turned off on Linux -# GUI_port: -1 - -# The Agent runs workers in parallel to execute checks. By default the number -# of workers is set to 1. If set to 0 the agent will automatically determine -# the best number of runners needed based on the number of checks running. This -# would optimize the check collection time but may produce CPU spikes. -# check_runners: 1 - -# Metadata collection should always be enabled, except if you are running several -# agents/dsd instances per host. In that case, only one agent should have it on. -# WARNING: disabling it on every agent will lead to display and billing issues -# enable_metadata_collection: true - -# Enable the gohai collection of systems data -# enable_gohai: true - -# IPC api server timeout in seconds -# server_timeout: 15 - -# Some environments may have the procfs file system mounted in a miscellaneous -# location. The procfs_path configuration parameter provides a mechanism to -# override the standard default location: '/proc' - this setting will trickle -# down to integrations and affect their behavior if they rely on the psutil -# python package. -# procfs_path: /proc - -# BETA: Encrypted Secrets (Linux only) -# -# This feature is in beta and its options or behaviour might break between -# minor or bugfix releases of the Agent. -# -# The agent can call an external command to fetch secrets. The command will be -# executed maximum once per instance containing an encrypted password. -# Secrets are cached by the agent, this will avoid executing again the -# secret_backend_command to fetch an already known secret (useful when combine -# with Autodiscovery). This feature is still in beta. -# -# For more information see: https://github.com/DataDog/datadog-agent/blob/master/docs/agent/secrets.md -# -# Path to the script to execute. The script must belong to the same user used -# to run the agent. Executable right must be given to the agent and no rights -# for 'group' or 'other'. -# secret_backend_command: /path/to/command -# -# A list of arguments to give to the command at each run (optional) -# secret_backend_arguments: -# - argument1 -# - argument2 -# -# The size in bytes of the buffer used to store the command answer (apply to -# both stdout and stderr) -# secret_backend_output_max_size: 1024 -# -# The timeout to execute the command in second -# secret_backend_timeout: 5 - - -# Metadata providers, add or remove from the list to enable or disable collection. -# Intervals are expressed in seconds. You can also set a provider's interval to 0 -# to disable it. -# metadata_providers: -# - name: k8s -# interval: 60 - -# DogStatsd -# -# If you don't want to enable the DogStatsd server, set this option to no -# use_dogstatsd: yes -# -# Make sure your client is sending to the same UDP port -# dogstatsd_port: 8125 -# -# The host to bind to receive external metrics (used only by the dogstatsd -# server for now). For dogstatsd this is ignored if -# 'dogstatsd_non_local_traffic' is set to true -# bind_host: localhost -# -# Dogstatsd can also listen for metrics on a Unix Socket (*nix only). -# Set to a valid filesystem path to enable. -# dogstatsd_socket: /var/run/dogstatsd/dsd.sock -# -# When using Unix Socket, dogstatsd can tag metrics with container metadata. -# If running dogstatsd in a container, host PID mode (e.g. with --pid=host) is required. -# dogstatsd_origin_detection: false -# -# The buffer size use to receive statsd packet, in bytes -# dogstatsd_buffer_size: 1024 -# -# Whether dogstatsd should listen to non local UDP traffic -# dogstatsd_non_local_traffic: no -# -# Publish dogstatsd's internal stats as Go expvars -# dogstatsd_stats_enable: no -# -# How many items in the dogstatsd's stats circular buffer -# dogstatsd_stats_buffer: 10 -# -# The port for the go_expvar server -# dogstatsd_stats_port: 5000 -# -# The number of bytes allocated to dogstatsd's socket receive buffer (POSIX -# system only). By default, this value is set by the system. If you need to -# increase the size of this buffer but keep the OS default value the same, you -# can set dogstatsd's receive buffer size here. The maximum accepted value -# might change depending on the OS. -# dogstatsd_so_rcvbuf: -# -# If you want to forward every packet received by the dogstatsd server -# to another statsd server, uncomment these lines. -# WARNING: Make sure that forwarded packets are regular statsd packets and not "dogstatsd" packets, -# as your other statsd server might not be able to handle them. -# statsd_forward_host: address_of_own_statsd_server -# statsd_forward_port: 8125 -# -# If you want all statsd metrics coming from this host to be namespaced -# you can configure the namspace below. Each metric received will be prefixed -# with the namespace before it's sent to Datadog. -# statsd_metric_namespace: - -# Logs agent -# -# Logs agent is disabled by default -#logs_enabled: true -# -# Enable logs collection for all containers, disabled by default -# logs_config: -# container_collect_all: false -# - -# JMX -# -# jmx_pipe_path: -# jmx_pipe_name: dd-auto_discovery -# -# If you only run Autodiscovery tests, jmxfetch might fail to pick up custom_jar_paths -# set in the check templates. If that is the case, you can force custom jars here. -# jmx_custom_jars: -# - /jmx-jars/jboss-cli-client.jar -# -# When running in a memory cgroup, openjdk 8u131 and higher can automatically adjust -# its heap memory usage in accordance to the cgroup/container's memory limit. -# Default is false: we'll set a Xmx of 200MB if none is configured. -# Note: older openjdk versions and other jvms might fail to start if this option is set -# -# jmx_use_cgroup_memory_limit: true -# - -# Autoconfig -# -# Directory containing configuration templates -# autoconf_template_dir: /datadog/check_configs -# -# The providers the Agent should call to collect checks configurations. -# Please note the File Configuration Provider is enabled by default and cannot -# be configured. -# config_providers: - -## The kubelet provider handles templates embedded in pod annotations, see -## https://docs.datadoghq.com/guides/autodiscovery/#template-source-kubernetes-pod-annotations -# - name: kubelet -# polling: true - -## The docker provider handles templates embedded in container labels, see -## https://docs.datadoghq.com/guides/autodiscovery/#template-source-docker-label-annotations -# - name: docker -# polling: true - -# - name: etcd -# polling: true -# template_dir: /datadog/check_configs -# template_url: http://127.0.0.1 -# username: -# password: - -# - name: consul -# polling: true -# template_dir: /datadog/check_configs -# template_url: http://127.0.0.1 -# ca_file: -# ca_path: -# cert_file: -# key_file: -# username: -# password: -# token: - -# - name: zookeeper -# polling: true -# template_dir: /datadog/check_configs -# template_url: 127.0.0.1 -# username: -# password: - -# Logging -# -# log_level: info -# log_file: /var/log/datadog/agent.log - -# Set to 'yes' to output logs in JSON format -# log_format_json: no - -# Set to 'no' to disable logging to stdout -# log_to_console: yes - -# Set to 'yes' to disable logging to the log file -# disable_file_logging: no - -# Set to 'yes' to enable logging to syslog. -# -# log_to_syslog: no -# -# If 'syslog_uri' is left undefined/empty, a local domain socket connection will be attempted -# -# syslog_uri: -# -# Set to 'yes' to output in an RFC 5424-compliant format -# -# syslog_rfc: no -# -# If TLS enabled, you must specify a path to a PEM certificate here -# -# syslog_pem: /path/to/certificate.pem -# -# If TLS enabled, you must specify a path to a private key here -# -# syslog_key: /path/to/key.pem -# -# If TLS enabled, you may enforce TLS verification here (defaults to true) -# -# syslog_tls_verify: yes -# - -# Autodiscovery -# -# Change the root directory to look at to get cgroup statistics. Useful when running inside a -# container with host directories mounted on a different folder. -# Default if environment variable "DOCKER_DD_AGENT" is set to "yes" -# "/host/sys/fs/cgroup" and "/sys/fs/cgroup" if not. -# -# container_cgroup_root: /host/sys/fs/cgroup/ -# -# Change the root directory to look at to get proc statistics. Useful when running inside a -# container with host directories mounted on a different folder. -# Default if environment variable "DOCKER_DD_AGENT" is set to "yes" -# "/host/proc" and "/proc" if not. -# -# container_proc_root: /host/proc -# -# Choose "auto" if you want to let the agent find any relevant listener on your host -# At the moment, the only auto listener supported is docker -# If you have already set docker anywhere in the listeners, the auto listener is ignored -# listeners: -# - name: auto -# - name: docker -# -# Exclude containers from metrics and AD based on their name or image: -# An excluded container will not get any individual container metric reported for it. -# Please note that the `docker.containers.running`, `.stopped`, `.running.total` and -# `.stopped.total` metrics are not affected by these settings and always count all -# containers. This does not affect your per-container billing. -# -# How it works: include first. -# If a container matches an exclude rule, it won't be included unless it first matches an include rule. -# -# Rules are regexp. -# -# Examples: -# exclude all, except containers based on the 'ubuntu' image or the 'debian' image. -# ac_exclude: ["image:.*"] -# ac_include: ["image:ubuntu", "image:debian"] -# -# include all, except containers based on the 'ubuntu' image. -# ac_exclude: ["image:ubuntu"] -# ac_include: [] -# -# exclude all debian images except containers with a name starting with 'frontend'. -# ac_exclude: ["image:debian"] -# ac_include: ["name:frontend.*"] -# -# ac_exclude: [] -# ac_include: [] -# -# -# Exclude default pause containers from orchestrators. -# -# By default the agent will not monitor kubernetes/openshift pause -# container. They will still be counted in the container count (just like -# excluded containers) since ignoring them would give a wrong impression -# about the docker daemon load. -# -# exclude_pause_container: true - -# Exclude default containers from DockerCloud: -# The following configuration will instruct the agent to ignore the containers from Docker Cloud. -# You can remove the ones you want to collect. -# ac_exclude: ["image:dockercloud/network-daemon","image:dockercloud/cleanup","image:dockercloud/logrotate","image:dockercloud/events","image:dockercloud/ntpd"] -# ac_include: [] -# -# You can also use the regex to ignore them all: -# ac_exclude: ["image:dockercloud/*"] -# ac_include: [] -# -# The default timeout value when connecting to the docker daemon -# is 5 seconds. It can be configured with this option. -# docker_query_timeout: 5 -# - -# Docker tag extraction -# -# We can extract container label or environment variables -# as metric tags. If you prefix your tag name with +, it -# will only be added to high cardinality metrics (docker check) -# -# docker_labels_as_tags: -# label_name: tag_name -# high_cardinality_label_name: +tag_name -# docker_env_as_tags: -# ENVVAR_NAME: tag_name -# -# Example: -# docker_labels_as_tags: -# com.docker.compose.service: service_name -# com.docker.compose.project: +project_name -# - -# Kubernetes tag extraction -# -# We can extract pod labels and annotations as metric tags. If you prefix your -# tag name with +, it will only be added to high cardinality metrics -# -# kubernetes_pod_labels_as_tags: -# app: kube_app -# pod-template-hash: +kube_pod-template-hash -# -# kubernetes_pod_annotations_as_tags: -# app: kube_app -# pod-template-hash: +kube_pod-template-hash -# - -# ECS integration -# -# URL where the ECS agent can be found. Standard cases will be autodetected. -# ecs_agent_url: http://localhost:51678 -# - -# Kubernetes kubelet connectivity -# -# The kubelet host and port should be autodetected when running inside a pod. -# If you run into connectivity issues, you can set these options according to -# your cluster setup: -# kubernetes_kubelet_host: autodetected -# kubernetes_http_kubelet_port: 10255 -# kubernetes_https_kubelet_port: 10250 -# -# When using HTTPS, we verify the kubelet's certificate, you can tune this: -# kubelet_tls_verify: true -# kubelet_client_ca: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt -# -# If authentication is needed, the agent will use the pod's serviceaccount's -# credentials. If you want to use a different account, or are running the agent -# on the host, you can set the credentials to use here: -# kubelet_auth_token_path: /path/to/file -# kubelet_client_crt: /path/to/key -# kubelet_client_key: /path/to/key -# - -# Kubernetes apiserver integration -# -# When running in a pod, the agent will automatically use the pod's serviceaccount -# to authenticate with the apiserver. If you wish to install the agent out of a pod -# or customise connection parameters, you can provide the path to a KubeConfig file -# see https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/ -# -# kubernetes_kubeconfig_path: /path/to/file -# -# In order to collect Kubernetes service names, the agent needs certain rights (see RBAC documentation in -# [docker readme](https://github.com/DataDog/datadog-agent/blob/master/Dockerfiles/agent/README.md#kubernetes)). -# You can disable this option or set how often (in seconds) the agent refreshes the internal mapping of services to -# ContainerIDs with the following options: -# kubernetes_collect_metadata_tags: true -# kubernetes_metadata_tag_update_freq: 60 -# kubernetes_apiserver_client_timeout: 10 -# kubernetes_apiserver_poll_freq: 30 -# -# To collect Kubernetes events, leader election must be enabled and collect_kubernetes_events set to true. -# Only the leader will collect events. More details about events [here](https://github.com/DataDog/datadog-agent/blob/master/Dockerfilesagent/README.md#event-collection). -# collect_kubernetes_events: false -# -# -# Leader Election settings, more details about leader election [here](https://github.com/DataDog/datadog-agent/blob/master/Dockerfilesagent/README.md#leader-election) -# To enable the leader election on this node, set the leader_election variable to true. -# leader_election: false -# The leader election lease is an integer in seconds. -# leader_lease_duration: 60 -# -# Node labels that should be collected and their name in host tags. Off by default. -# Some of these labels are redundant with metadata collected by -# cloud provider crawlers (AWS, GCE, Azure) -# -# kubernetes_node_labels_as_tags: -# kubernetes.io/hostname: nodename -# beta.kubernetes.io/os: os - -# Process agent specific settings -# -process_config: -# A string indicating the enabled state of the Process Agent. -# If "false" (the default) it will only collect containers. -# If "true" it will collect containers and processes. -# If "disabled" it will be disabled altogether and won't start. - enabled: "true" -# The full path to the file where process-agent logs will be written. -# log_file: -# The interval, in seconds, at which we will run each check. If you want consistent -# behavior between real-time you may set the Container/ProcessRT intervals to 10. -# Defaults to 10s for normal checks and 2s for others. -# intervals: -# container: -# container_realtime: -# process: -# process_realtime: -# A list of regex patterns that will exclude a process if matched. -# blacklist_patterns: -# How many check results to buffer in memory when POST fails. The default is usually fine. -# queue_size: -# The maximum number of file descriptors to open when collecting net connections. -# Only change if you are running out of file descriptors from the Agent. -# max_proc_fds: -# The maximum number of processes or containers per message. -# Only change if the defaults are causing issues. -# max_per_message: -# Overrides the path to the Agent bin used for getting the hostname. The default is usually fine. -# dd_agent_bin: -# Overrides of the environment we pass to fetch the hostname. The default is usually fine. -# dd_agent_env: - -# Trace Agent Specific Settings -# -# apm_config: -# Whether or not the APM Agent should run -# enabled: true -# The environment tag that Traces should be tagged with -# Will inherit from "env" tag if none is applied here -# env: none -# The port that the Receiver should listen on -# receiver_port: 8126 -# Whether the Trace Agent should listen for non local traffic -# Only enable if Traces are being sent to this Agent from another host/container -# apm_non_local_traffic: false -# Extra global sample rate to apply on all the traces -# This sample rate is combined to the sample rate from the sampler logic, still promoting interesting traces -# From 1 (no extra rate) to 0 (don't sample at all) -# extra_sample_rate: 1.0 -# Maximum number of traces per second to sample. -# The limit is applied over an average over a few minutes ; much bigger spikes are possible. -# Set to 0 to disable the limit. -# max_traces_per_second: 10 -# A blacklist of regular expressions can be provided to disable certain traces based on their resource name -# all entries must be surrounded by double quotes and separated by commas -# Example: ["(GET|POST) /healthcheck", "GET /V1"] -# ignore_resources: [] diff --git a/contrib/testnets/remote/ansible/roles/upgrade-gaiad/handlers/main.yml b/contrib/testnets/remote/ansible/roles/upgrade-gaiad/handlers/main.yml deleted file mode 100644 index 8a63ccbf..00000000 --- a/contrib/testnets/remote/ansible/roles/upgrade-gaiad/handlers/main.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- - -- name: restart gaiad - service: name=gaiad state=restarted - diff --git a/contrib/testnets/remote/ansible/roles/upgrade-gaiad/tasks/main.yml b/contrib/testnets/remote/ansible/roles/upgrade-gaiad/tasks/main.yml deleted file mode 100644 index b52de9eb..00000000 --- a/contrib/testnets/remote/ansible/roles/upgrade-gaiad/tasks/main.yml +++ /dev/null @@ -1,29 +0,0 @@ ---- - -- name: Copy binary - copy: - src: "{{BINARY}}" - dest: /usr/bin/gaiad - mode: 0755 - notify: restart gaiad - -- name: Copy new genesis.json file, if available - when: "GENESISFILE is defined and GENESISFILE != ''" - copy: - src: "{{GENESISFILE}}" - dest: /home/gaiad/.gaia/config/genesis.json - notify: restart gaiad - -- name: Download genesis.json URL, if available - when: "GENESISURL is defined and GENESISURL != ''" - get_url: - url: "{{GENESISURL}}" - dest: /home/gaiad/.gaia/config/genesis.json - force: yes - notify: restart gaiad - -- name: Reset network - when: UNSAFE_RESET_ALL | default(false) | bool - command: "sudo -u gaiad gaiad unsafe-reset-all" - notify: restart gaiad - diff --git a/contrib/testnets/remote/ansible/set-debug.yml b/contrib/testnets/remote/ansible/set-debug.yml deleted file mode 100644 index 76ee1b35..00000000 --- a/contrib/testnets/remote/ansible/set-debug.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- - -- hosts: all - any_errors_fatal: true - gather_facts: no - roles: - - set-debug - diff --git a/contrib/testnets/remote/ansible/setup-fullnodes.yml b/contrib/testnets/remote/ansible/setup-fullnodes.yml deleted file mode 100644 index da1810d1..00000000 --- a/contrib/testnets/remote/ansible/setup-fullnodes.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- - -#GENESISFILE required -#CONFIGFILE required -#BINARY required - -- hosts: all - any_errors_fatal: true - gather_facts: no - roles: - - increase-openfiles - - setup-fullnodes - diff --git a/contrib/testnets/remote/ansible/setup-journald.yml b/contrib/testnets/remote/ansible/setup-journald.yml deleted file mode 100644 index 369c483f..00000000 --- a/contrib/testnets/remote/ansible/setup-journald.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- - -#DD_API_KEY - -- hosts: all - any_errors_fatal: true - gather_facts: no - roles: - - setup-journald - diff --git a/contrib/testnets/remote/ansible/setup-validators.yml b/contrib/testnets/remote/ansible/setup-validators.yml deleted file mode 100644 index 0e6f2959..00000000 --- a/contrib/testnets/remote/ansible/setup-validators.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- - -- hosts: all - any_errors_fatal: true - gather_facts: no - roles: - - increase-openfiles - - setup-validators - diff --git a/contrib/testnets/remote/ansible/start.yml b/contrib/testnets/remote/ansible/start.yml deleted file mode 100644 index bc29679e..00000000 --- a/contrib/testnets/remote/ansible/start.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- - -- hosts: all - any_errors_fatal: true - gather_facts: no - vars: - - service: gaiad - roles: - - start - diff --git a/contrib/testnets/remote/ansible/status.yml b/contrib/testnets/remote/ansible/status.yml deleted file mode 100644 index ebd7f72e..00000000 --- a/contrib/testnets/remote/ansible/status.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- - -- hosts: all - connection: local - any_errors_fatal: true - gather_facts: no - - tasks: - - name: Gather status - uri: - body_format: json - url: "http://{{ansible_host}}:26657/status" - register: status - - - name: Print status - debug: var=status.json.result - diff --git a/contrib/testnets/remote/ansible/stop.yml b/contrib/testnets/remote/ansible/stop.yml deleted file mode 100644 index 312cb9cf..00000000 --- a/contrib/testnets/remote/ansible/stop.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- - -- hosts: all - any_errors_fatal: true - gather_facts: no - vars: - - service: gaiad - roles: - - stop - diff --git a/contrib/testnets/remote/ansible/update-datadog-agent.yml b/contrib/testnets/remote/ansible/update-datadog-agent.yml deleted file mode 100644 index 3fe1e000..00000000 --- a/contrib/testnets/remote/ansible/update-datadog-agent.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- - -#DD_API_KEY,TESTNET_NAME,CLUSTER_NAME required - -- hosts: all - any_errors_fatal: true - gather_facts: no - roles: - - update-datadog-agent - diff --git a/contrib/testnets/remote/ansible/upgrade-gaia.yml b/contrib/testnets/remote/ansible/upgrade-gaia.yml deleted file mode 100644 index cde56034..00000000 --- a/contrib/testnets/remote/ansible/upgrade-gaia.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- - -- hosts: all - any_errors_fatal: true - gather_facts: no - roles: - - upgrade-gaiad - - add-lcd - diff --git a/contrib/testnets/remote/ansible/upgrade-gaiad.yml b/contrib/testnets/remote/ansible/upgrade-gaiad.yml deleted file mode 100644 index 4e81c743..00000000 --- a/contrib/testnets/remote/ansible/upgrade-gaiad.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- - -# Required: BINARY -# Optional: GENESISFILE, UNSAFE_RESET_ALL - -- hosts: all - any_errors_fatal: true - gather_facts: no - roles: - - upgrade-gaiad - diff --git a/contrib/testnets/remote/terraform-app/.gitignore b/contrib/testnets/remote/terraform-app/.gitignore deleted file mode 100644 index d882c944..00000000 --- a/contrib/testnets/remote/terraform-app/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -.terraform -terraform.tfstate -terraform.tfstate.backup -terraform.tfstate.d -.terraform.tfstate.lock.info diff --git a/contrib/testnets/remote/terraform-app/files/terraform.sh b/contrib/testnets/remote/terraform-app/files/terraform.sh deleted file mode 100644 index 60b4dd8e..00000000 --- a/contrib/testnets/remote/terraform-app/files/terraform.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash -# Script to initialize a testnet settings on a server - -#Usage: terraform.sh - -#Add gaiad node number for remote identification -echo "$2" > /etc/nodeid - diff --git a/contrib/testnets/remote/terraform-app/infra/attachment.tf b/contrib/testnets/remote/terraform-app/infra/attachment.tf deleted file mode 100644 index 1ba5f4fe..00000000 --- a/contrib/testnets/remote/terraform-app/infra/attachment.tf +++ /dev/null @@ -1,21 +0,0 @@ -# This is the reason why we can't separate nodes and load balancer creation into different modules. -# https://github.com/hashicorp/terraform/issues/10857 -# In short: the list of instances coming from the nodes module is a generated variable -# and it should be the input for the load-balancer generation. However when attaching the instances -# to the load-balancer, aws_lb_target_group_attachment.count cannot be a generated value. - -#Instance Attachment (autoscaling is the future) -resource "aws_lb_target_group_attachment" "lb_attach" { - count = "${var.SERVERS*min(length(data.aws_availability_zones.zones.names),var.max_zones)}" - target_group_arn = "${aws_lb_target_group.lb_target_group.arn}" - target_id = "${element(aws_instance.node.*.id,count.index)}" - port = 26657 -} - -resource "aws_lb_target_group_attachment" "lb_attach_lcd" { - count = "${var.SERVERS*min(length(data.aws_availability_zones.zones.names),var.max_zones)}" - target_group_arn = "${aws_lb_target_group.lb_target_group_lcd.arn}" - target_id = "${element(aws_instance.node.*.id,count.index)}" - port = 1317 -} - diff --git a/contrib/testnets/remote/terraform-app/infra/instance.tf b/contrib/testnets/remote/terraform-app/infra/instance.tf deleted file mode 100644 index 53b21e62..00000000 --- a/contrib/testnets/remote/terraform-app/infra/instance.tf +++ /dev/null @@ -1,58 +0,0 @@ -resource "aws_key_pair" "key" { - key_name = "${var.name}" - public_key = "${file(var.ssh_public_file)}" -} - -data "aws_ami" "linux" { - most_recent = true - filter { - name = "name" - values = ["${var.image_name}"] - } -} - -resource "aws_instance" "node" { -# depends_on = ["${element(aws_route_table_association.route_table_association.*,count.index)}"] - count = "${var.SERVERS*min(length(data.aws_availability_zones.zones.names),var.max_zones)}" - ami = "${data.aws_ami.linux.image_id}" - instance_type = "${var.instance_type}" - key_name = "${aws_key_pair.key.key_name}" - associate_public_ip_address = true - vpc_security_group_ids = [ "${aws_security_group.secgroup.id}" ] - subnet_id = "${element(aws_subnet.subnet.*.id,count.index)}" - availability_zone = "${element(data.aws_availability_zones.zones.names,count.index)}" - - tags { - Environment = "${var.name}" - Name = "${var.name}-${element(data.aws_availability_zones.zones.names,count.index)}" - } - - volume_tags { - Environment = "${var.name}" - Name = "${var.name}-${element(data.aws_availability_zones.zones.names,count.index)}-VOLUME" - } - - root_block_device { - volume_size = 40 - } - - connection { - user = "centos" - private_key = "${file(var.ssh_private_file)}" - timeout = "600s" - } - - provisioner "file" { - source = "files/terraform.sh" - destination = "/tmp/terraform.sh" - } - - provisioner "remote-exec" { - inline = [ - "chmod +x /tmp/terraform.sh", - "sudo /tmp/terraform.sh ${var.name} ${count.index}", - ] - } - -} - diff --git a/contrib/testnets/remote/terraform-app/infra/lb.tf b/contrib/testnets/remote/terraform-app/infra/lb.tf deleted file mode 100644 index 201a53ff..00000000 --- a/contrib/testnets/remote/terraform-app/infra/lb.tf +++ /dev/null @@ -1,52 +0,0 @@ -resource "aws_lb" "lb" { - name = "${var.name}" - subnets = ["${aws_subnet.subnet.*.id}"] - security_groups = ["${aws_security_group.secgroup.id}"] - tags { - Name = "${var.name}" - } -# access_logs { -# bucket = "${var.s3_bucket}" -# prefix = "lblogs" -# } -} - -resource "aws_lb_listener" "lb_listener" { - load_balancer_arn = "${aws_lb.lb.arn}" - port = "443" - protocol = "HTTPS" - ssl_policy = "ELBSecurityPolicy-TLS-1-2-Ext-2018-06" - certificate_arn = "${var.certificate_arn}" - - default_action { - target_group_arn = "${aws_lb_target_group.lb_target_group.arn}" - type = "forward" - } -} - -resource "aws_lb_listener_rule" "listener_rule" { - listener_arn = "${aws_lb_listener.lb_listener.arn}" - priority = "100" - action { - type = "forward" - target_group_arn = "${aws_lb_target_group.lb_target_group.id}" - } - condition { - field = "path-pattern" - values = ["/"] - } -} - -resource "aws_lb_target_group" "lb_target_group" { - name = "${var.name}" - port = "26657" - protocol = "HTTP" - vpc_id = "${aws_vpc.vpc.id}" - tags { - name = "${var.name}" - } - health_check { - path = "/health" - } -} - diff --git a/contrib/testnets/remote/terraform-app/infra/lcd.tf b/contrib/testnets/remote/terraform-app/infra/lcd.tf deleted file mode 100644 index 5d09903d..00000000 --- a/contrib/testnets/remote/terraform-app/infra/lcd.tf +++ /dev/null @@ -1,39 +0,0 @@ -resource "aws_lb_listener" "lb_listener_lcd" { - load_balancer_arn = "${aws_lb.lb.arn}" - port = "1317" - protocol = "HTTPS" - ssl_policy = "ELBSecurityPolicy-TLS-1-2-Ext-2018-06" - certificate_arn = "${var.certificate_arn}" - - default_action { - target_group_arn = "${aws_lb_target_group.lb_target_group_lcd.arn}" - type = "forward" - } -} - -resource "aws_lb_listener_rule" "listener_rule_lcd" { - listener_arn = "${aws_lb_listener.lb_listener_lcd.arn}" - priority = "100" - action { - type = "forward" - target_group_arn = "${aws_lb_target_group.lb_target_group_lcd.id}" - } - condition { - field = "path-pattern" - values = ["/"] - } -} - -resource "aws_lb_target_group" "lb_target_group_lcd" { - name = "${var.name}lcd" - port = "1317" - protocol = "HTTP" - vpc_id = "${aws_vpc.vpc.id}" - tags { - name = "${var.name}" - } - health_check { - path = "/node_version" - } -} - diff --git a/contrib/testnets/remote/terraform-app/infra/outputs.tf b/contrib/testnets/remote/terraform-app/infra/outputs.tf deleted file mode 100644 index fdb32611..00000000 --- a/contrib/testnets/remote/terraform-app/infra/outputs.tf +++ /dev/null @@ -1,24 +0,0 @@ -// The cluster name -output "name" { - value = "${var.name}" -} - -// The list of cluster instance IDs -output "instances" { - value = ["${aws_instance.node.*.id}"] -} - -#output "instances_count" { -# value = "${length(aws_instance.node.*)}" -#} - -// The list of cluster instance public IPs -output "public_ips" { - value = ["${aws_instance.node.*.public_ip}"] -} - -// Name of the ALB -output "lb_name" { - value = "${aws_lb.lb.dns_name}" -} - diff --git a/contrib/testnets/remote/terraform-app/infra/variables.tf b/contrib/testnets/remote/terraform-app/infra/variables.tf deleted file mode 100644 index 0a96f144..00000000 --- a/contrib/testnets/remote/terraform-app/infra/variables.tf +++ /dev/null @@ -1,39 +0,0 @@ -variable "name" { - description = "The testnet name, e.g cdn" -} - -variable "image_name" { - description = "Image name" - default = "CentOS Linux 7 x86_64 HVM EBS 1704_01" -} - -variable "instance_type" { - description = "The instance size to use" - default = "t2.small" -} - -variable "SERVERS" { - description = "Number of servers in an availability zone" - default = "1" -} - -variable "max_zones" { - description = "Maximum number of availability zones to use" - default = "1" -} - -variable "ssh_private_file" { - description = "SSH private key file to be used to connect to the nodes" - type = "string" -} - -variable "ssh_public_file" { - description = "SSH public key file to be used on the nodes" - type = "string" -} - -variable "certificate_arn" { - description = "Load-balancer SSL certificate AWS ARN" - type = "string" -} - diff --git a/contrib/testnets/remote/terraform-app/infra/vpc.tf b/contrib/testnets/remote/terraform-app/infra/vpc.tf deleted file mode 100644 index 638ccfe0..00000000 --- a/contrib/testnets/remote/terraform-app/infra/vpc.tf +++ /dev/null @@ -1,104 +0,0 @@ -resource "aws_vpc" "vpc" { - cidr_block = "10.0.0.0/16" - - tags { - Name = "${var.name}" - } - -} - -resource "aws_internet_gateway" "internet_gateway" { - vpc_id = "${aws_vpc.vpc.id}" - - tags { - Name = "${var.name}" - } -} - -resource "aws_route_table" "route_table" { - vpc_id = "${aws_vpc.vpc.id}" - - route { - cidr_block = "0.0.0.0/0" - gateway_id = "${aws_internet_gateway.internet_gateway.id}" - } - - tags { - Name = "${var.name}" - } -} - -data "aws_availability_zones" "zones" { - state = "available" -} - -resource "aws_subnet" "subnet" { - count = "${min(length(data.aws_availability_zones.zones.names),var.max_zones)}" - vpc_id = "${aws_vpc.vpc.id}" - availability_zone = "${element(data.aws_availability_zones.zones.names,count.index)}" - cidr_block = "${cidrsubnet(aws_vpc.vpc.cidr_block, 8, count.index)}" - map_public_ip_on_launch = "true" - - tags { - Name = "${var.name}-${element(data.aws_availability_zones.zones.names,count.index)}" - } -} - -resource "aws_route_table_association" "route_table_association" { - count = "${min(length(data.aws_availability_zones.zones.names),var.max_zones)}" - subnet_id = "${element(aws_subnet.subnet.*.id,count.index)}" - route_table_id = "${aws_route_table.route_table.id}" -} - -resource "aws_security_group" "secgroup" { - name = "${var.name}" - vpc_id = "${aws_vpc.vpc.id}" - description = "Automated security group for application instances" - tags { - Name = "${var.name}" - } - - ingress { - from_port = 22 - to_port = 22 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - } - - ingress { - from_port = 443 - to_port = 443 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - } - - ingress { - from_port = 1317 - to_port = 1317 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - } - - ingress { - from_port = 26656 - to_port = 26657 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - } - - ingress { - from_port = 26660 - to_port = 26660 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - } - - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - - } -} - diff --git a/contrib/testnets/remote/terraform-app/main.tf b/contrib/testnets/remote/terraform-app/main.tf deleted file mode 100644 index 687e3b5b..00000000 --- a/contrib/testnets/remote/terraform-app/main.tf +++ /dev/null @@ -1,73 +0,0 @@ -#Terraform Configuration - -variable "APP_NAME" { - description = "Name of the application" -} - -variable "SERVERS" { - description = "Number of servers in an availability zone" - default = "1" -} - -variable "MAX_ZONES" { - description = "Maximum number of availability zones to use" - default = "4" -} - -#See https://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region -#eu-west-3 does not contain CentOS images -variable "REGION" { - description = "AWS Regions" - default = "us-east-1" -} - -variable "SSH_PRIVATE_FILE" { - description = "SSH private key file to be used to connect to the nodes" - type = "string" -} - -variable "SSH_PUBLIC_FILE" { - description = "SSH public key file to be used on the nodes" - type = "string" -} - -variable "CERTIFICATE_ARN" { - description = "Load-balancer certificate AWS ARN" - type = "string" -} - -# ap-southeast-1 and ap-southeast-2 does not contain the newer CentOS 1704 image -variable "image" { - description = "AWS image name" - default = "CentOS Linux 7 x86_64 HVM EBS 1703_01" -} - -variable "instance_type" { - description = "AWS instance type" - default = "t2.large" -} - -provider "aws" { - region = "${var.REGION}" -} - -module "nodes" { - source = "infra" - name = "${var.APP_NAME}" - image_name = "${var.image}" - instance_type = "${var.instance_type}" - ssh_public_file = "${var.SSH_PUBLIC_FILE}" - ssh_private_file = "${var.SSH_PRIVATE_FILE}" - certificate_arn = "${var.CERTIFICATE_ARN}" - SERVERS = "${var.SERVERS}" - max_zones = "${var.MAX_ZONES}" -} - -output "public_ips" { - value = "${module.nodes.public_ips}", -} - -output "lb_name" { - value = "${module.nodes.lb_name}" -} - diff --git a/contrib/testnets/remote/terraform-aws/.gitignore b/contrib/testnets/remote/terraform-aws/.gitignore deleted file mode 100644 index d882c944..00000000 --- a/contrib/testnets/remote/terraform-aws/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -.terraform -terraform.tfstate -terraform.tfstate.backup -terraform.tfstate.d -.terraform.tfstate.lock.info diff --git a/contrib/testnets/remote/terraform-aws/files/terraform.sh b/contrib/testnets/remote/terraform-aws/files/terraform.sh deleted file mode 100644 index 47363b37..00000000 --- a/contrib/testnets/remote/terraform-aws/files/terraform.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -# Script to initialize a testnet settings on a server - -#Usage: terraform.sh - -#Add gaiad node number for remote identification -REGION="$(($2 + 1))" -RNODE="$(($3 + 1))" -ID="$((${REGION} * 100 + ${RNODE}))" -echo "$ID" > /etc/nodeid - diff --git a/contrib/testnets/remote/terraform-aws/main.tf b/contrib/testnets/remote/terraform-aws/main.tf deleted file mode 100644 index 41e05995..00000000 --- a/contrib/testnets/remote/terraform-aws/main.tf +++ /dev/null @@ -1,249 +0,0 @@ -#Terraform Configuration - -#See https://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region -#eu-west-3 does not contain CentOS images -#us-east-1 usually contains other infrastructure and creating keys and security groups might conflict with that -variable "REGIONS" { - description = "AWS Regions" - type = "list" - default = ["us-east-2", "us-west-1", "us-west-2", "ap-south-1", "ap-northeast-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ca-central-1", "eu-central-1", "eu-west-1", "eu-west-2", "sa-east-1"] -} - -variable "TESTNET_NAME" { - description = "Name of the testnet" - default = "remotenet" -} - -variable "REGION_LIMIT" { - description = "Number of regions to populate" - default = "1" -} - -variable "SERVERS" { - description = "Number of servers in an availability zone" - default = "1" -} - -variable "SSH_PRIVATE_FILE" { - description = "SSH private key file to be used to connect to the nodes" - type = "string" -} - -variable "SSH_PUBLIC_FILE" { - description = "SSH public key file to be used on the nodes" - type = "string" -} - - -# ap-southeast-1 and ap-southeast-2 does not contain the newer CentOS 1704 image -variable "image" { - description = "AWS image name" - default = "CentOS Linux 7 x86_64 HVM EBS 1703_01" -} - -variable "instance_type" { - description = "AWS instance type" - default = "t2.large" -} - -module "nodes-0" { - source = "nodes" - name = "${var.TESTNET_NAME}" - image_name = "${var.image}" - instance_type = "${var.instance_type}" - region = "${element(var.REGIONS,0)}" - multiplier = "0" - execute = "${var.REGION_LIMIT > 0}" - ssh_public_file = "${var.SSH_PUBLIC_FILE}" - ssh_private_file = "${var.SSH_PRIVATE_FILE}" - SERVERS = "${var.SERVERS}" -} - -module "nodes-1" { - source = "nodes" - name = "${var.TESTNET_NAME}" - image_name = "${var.image}" - instance_type = "${var.instance_type}" - region = "${element(var.REGIONS,1)}" - multiplier = "1" - execute = "${var.REGION_LIMIT > 1}" - ssh_public_file = "${var.SSH_PUBLIC_FILE}" - ssh_private_file = "${var.SSH_PRIVATE_FILE}" - SERVERS = "${var.SERVERS}" -} - -module "nodes-2" { - source = "nodes" - name = "${var.TESTNET_NAME}" - image_name = "${var.image}" - instance_type = "${var.instance_type}" - region = "${element(var.REGIONS,2)}" - multiplier = "2" - execute = "${var.REGION_LIMIT > 2}" - ssh_public_file = "${var.SSH_PUBLIC_FILE}" - ssh_private_file = "${var.SSH_PRIVATE_FILE}" - SERVERS = "${var.SERVERS}" -} - -module "nodes-3" { - source = "nodes" - name = "${var.TESTNET_NAME}" - image_name = "${var.image}" - instance_type = "${var.instance_type}" - region = "${element(var.REGIONS,3)}" - multiplier = "3" - execute = "${var.REGION_LIMIT > 3}" - ssh_public_file = "${var.SSH_PUBLIC_FILE}" - ssh_private_file = "${var.SSH_PRIVATE_FILE}" - SERVERS = "${var.SERVERS}" -} - -module "nodes-4" { - source = "nodes" - name = "${var.TESTNET_NAME}" - image_name = "${var.image}" - instance_type = "${var.instance_type}" - region = "${element(var.REGIONS,4)}" - multiplier = "4" - execute = "${var.REGION_LIMIT > 4}" - ssh_public_file = "${var.SSH_PUBLIC_FILE}" - ssh_private_file = "${var.SSH_PRIVATE_FILE}" - SERVERS = "${var.SERVERS}" -} - -module "nodes-5" { - source = "nodes" - name = "${var.TESTNET_NAME}" - image_name = "${var.image}" - instance_type = "${var.instance_type}" - region = "${element(var.REGIONS,5)}" - multiplier = "5" - execute = "${var.REGION_LIMIT > 5}" - ssh_public_file = "${var.SSH_PUBLIC_FILE}" - ssh_private_file = "${var.SSH_PRIVATE_FILE}" - SERVERS = "${var.SERVERS}" -} - -module "nodes-6" { - source = "nodes" - name = "${var.TESTNET_NAME}" - image_name = "${var.image}" - instance_type = "${var.instance_type}" - region = "${element(var.REGIONS,6)}" - multiplier = "6" - execute = "${var.REGION_LIMIT > 6}" - ssh_public_file = "${var.SSH_PUBLIC_FILE}" - ssh_private_file = "${var.SSH_PRIVATE_FILE}" - SERVERS = "${var.SERVERS}" -} - -module "nodes-7" { - source = "nodes" - name = "${var.TESTNET_NAME}" - image_name = "${var.image}" - instance_type = "${var.instance_type}" - region = "${element(var.REGIONS,7)}" - multiplier = "7" - execute = "${var.REGION_LIMIT > 7}" - ssh_public_file = "${var.SSH_PUBLIC_FILE}" - ssh_private_file = "${var.SSH_PRIVATE_FILE}" - SERVERS = "${var.SERVERS}" -} - -module "nodes-8" { - source = "nodes" - name = "${var.TESTNET_NAME}" - image_name = "${var.image}" - instance_type = "${var.instance_type}" - region = "${element(var.REGIONS,8)}" - multiplier = "8" - execute = "${var.REGION_LIMIT > 8}" - ssh_public_file = "${var.SSH_PUBLIC_FILE}" - ssh_private_file = "${var.SSH_PRIVATE_FILE}" - SERVERS = "${var.SERVERS}" -} - -module "nodes-9" { - source = "nodes" - name = "${var.TESTNET_NAME}" - image_name = "${var.image}" - instance_type = "${var.instance_type}" - region = "${element(var.REGIONS,9)}" - multiplier = "9" - execute = "${var.REGION_LIMIT > 9}" - ssh_public_file = "${var.SSH_PUBLIC_FILE}" - ssh_private_file = "${var.SSH_PRIVATE_FILE}" - SERVERS = "${var.SERVERS}" -} - -module "nodes-10" { - source = "nodes" - name = "${var.TESTNET_NAME}" - image_name = "${var.image}" - instance_type = "${var.instance_type}" - region = "${element(var.REGIONS,10)}" - multiplier = "10" - execute = "${var.REGION_LIMIT > 10}" - ssh_public_file = "${var.SSH_PUBLIC_FILE}" - ssh_private_file = "${var.SSH_PRIVATE_FILE}" - SERVERS = "${var.SERVERS}" -} - -module "nodes-11" { - source = "nodes" - name = "${var.TESTNET_NAME}" - image_name = "${var.image}" - instance_type = "${var.instance_type}" - region = "${element(var.REGIONS,11)}" - multiplier = "11" - execute = "${var.REGION_LIMIT > 11}" - ssh_public_file = "${var.SSH_PUBLIC_FILE}" - ssh_private_file = "${var.SSH_PRIVATE_FILE}" - SERVERS = "${var.SERVERS}" -} - -module "nodes-12" { - source = "nodes" - name = "${var.TESTNET_NAME}" - image_name = "${var.image}" - instance_type = "${var.instance_type}" - region = "${element(var.REGIONS,12)}" - multiplier = "12" - execute = "${var.REGION_LIMIT > 12}" - ssh_public_file = "${var.SSH_PUBLIC_FILE}" - ssh_private_file = "${var.SSH_PRIVATE_FILE}" - SERVERS = "${var.SERVERS}" -} - -module "nodes-13" { - source = "nodes" - name = "${var.TESTNET_NAME}" - image_name = "${var.image}" - instance_type = "${var.instance_type}" - region = "${element(var.REGIONS,13)}" - multiplier = "13" - execute = "${var.REGION_LIMIT > 13}" - ssh_public_file = "${var.SSH_PUBLIC_FILE}" - ssh_private_file = "${var.SSH_PRIVATE_FILE}" - SERVERS = "${var.SERVERS}" -} - -output "public_ips" { - value = "${concat( - module.nodes-0.public_ips, - module.nodes-1.public_ips, - module.nodes-2.public_ips, - module.nodes-3.public_ips, - module.nodes-4.public_ips, - module.nodes-5.public_ips, - module.nodes-6.public_ips, - module.nodes-7.public_ips, - module.nodes-8.public_ips, - module.nodes-9.public_ips, - module.nodes-10.public_ips, - module.nodes-11.public_ips, - module.nodes-12.public_ips, - module.nodes-13.public_ips - )}", -} - diff --git a/contrib/testnets/remote/terraform-aws/nodes/main.tf b/contrib/testnets/remote/terraform-aws/nodes/main.tf deleted file mode 100644 index 825be4af..00000000 --- a/contrib/testnets/remote/terraform-aws/nodes/main.tf +++ /dev/null @@ -1,104 +0,0 @@ - -provider "aws" { - region = "${var.region}" -} - -resource "aws_key_pair" "testnets" { - count = "${var.execute?1:0}" - key_name = "testnets-${var.name}" - public_key = "${file(var.ssh_public_file)}" -} - -data "aws_ami" "linux" { - most_recent = true - filter { - name = "name" - values = ["${var.image_name}"] - } -} - -data "aws_availability_zones" "zones" { - state = "available" -} - -resource "aws_security_group" "secgroup" { - count = "${var.execute?1:0}" - name = "${var.name}" - description = "Automated security group for performance testing testnets" - tags { - Name = "testnets-${var.name}" - } - - ingress { - from_port = 22 - to_port = 22 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - } - - ingress { - from_port = 26656 - to_port = 26657 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - } - - ingress { - from_port = 26660 - to_port = 26660 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - } - - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - - } -} - -resource "aws_instance" "node" { - count = "${var.execute?var.SERVERS*length(data.aws_availability_zones.zones.names):0}" - ami = "${data.aws_ami.linux.image_id}" - instance_type = "${var.instance_type}" - key_name = "${aws_key_pair.testnets.key_name}" - associate_public_ip_address = true - security_groups = [ "${aws_security_group.secgroup.name}" ] - availability_zone = "${element(data.aws_availability_zones.zones.names,count.index)}" - - tags { - Environment = "${var.name}" - Name = "${var.name}-${element(data.aws_availability_zones.zones.names,count.index)}" - } - - volume_tags { - Environment = "${var.name}" - Name = "${var.name}-${element(data.aws_availability_zones.zones.names,count.index)}-VOLUME" - } - - root_block_device { - volume_size = 40 - } - - connection { - user = "centos" - private_key = "${file(var.ssh_private_file)}" - timeout = "600s" - } - - provisioner "file" { - source = "files/terraform.sh" - destination = "/tmp/terraform.sh" - } - - provisioner "remote-exec" { - inline = [ - "chmod +x /tmp/terraform.sh", - "sudo /tmp/terraform.sh ${var.name} ${var.multiplier} ${count.index}", - ] - } - -} - diff --git a/contrib/testnets/remote/terraform-aws/nodes/outputs.tf b/contrib/testnets/remote/terraform-aws/nodes/outputs.tf deleted file mode 100644 index 2a4451d6..00000000 --- a/contrib/testnets/remote/terraform-aws/nodes/outputs.tf +++ /dev/null @@ -1,15 +0,0 @@ -// The cluster name -output "name" { - value = "${var.name}" -} - -// The list of cluster instance IDs -output "instances" { - value = ["${aws_instance.node.*.id}"] -} - -// The list of cluster instance public IPs -output "public_ips" { - value = ["${aws_instance.node.*.public_ip}"] -} - diff --git a/contrib/testnets/remote/terraform-aws/nodes/variables.tf b/contrib/testnets/remote/terraform-aws/nodes/variables.tf deleted file mode 100644 index ef540e69..00000000 --- a/contrib/testnets/remote/terraform-aws/nodes/variables.tf +++ /dev/null @@ -1,42 +0,0 @@ -variable "name" { - description = "The testnet name, e.g cdn" -} - -variable "image_name" { - description = "Image name" - default = "CentOS Linux 7 x86_64 HVM EBS 1704_01" -} - -variable "instance_type" { - description = "The instance size to use" - default = "t2.small" -} - -variable "region" { - description = "AWS region to use" -} - -variable "multiplier" { - description = "Multiplier for node identification" -} - -variable "execute" { - description = "Set to false to disable the module" - default = true -} - -variable "SERVERS" { - description = "Number of servers in an availability zone" - default = "1" -} - -variable "ssh_private_file" { - description = "SSH private key file to be used to connect to the nodes" - type = "string" -} - -variable "ssh_public_file" { - description = "SSH public key file to be used on the nodes" - type = "string" -} - diff --git a/contrib/testnets/remote/terraform-do/.gitignore b/contrib/testnets/remote/terraform-do/.gitignore deleted file mode 100644 index 79805236..00000000 --- a/contrib/testnets/remote/terraform-do/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -.terraform -terraform.tfstate -terraform.tfstate.backup -terraform.tfstate.d -.terraform.tfstate.lock.info - diff --git a/contrib/testnets/remote/terraform-do/Makefile b/contrib/testnets/remote/terraform-do/Makefile deleted file mode 100644 index 76040e20..00000000 --- a/contrib/testnets/remote/terraform-do/Makefile +++ /dev/null @@ -1,100 +0,0 @@ -######################################## -### WARNING: The DigitalOcean scripts are deprecated. They are still here because -### they might be useful for developers. - -# Name of the testnet. Used in chain-id. -TESTNET_NAME?=remotenet - -# Name of the servers grouped together for management purposes. Used in tagging the servers in the cloud. -CLUSTER_NAME?=$(TESTNET_NAME) - -# Number of servers deployed in Digital Ocean. -# Number of servers to put in one availability zone in AWS. -SERVERS?=1 - -# Path to gaiad for deployment. Must be a Linux binary. -BINARY?=$(CURDIR)/../build/gaiad - -# Path to the genesis.json and config.toml files to deploy on full nodes. -GENESISFILE?=$(CURDIR)/../build/genesis.json -CONFIGFILE?=$(CURDIR)/../build/config.toml - -all: - @echo "There is no all. Only sum of the ones." - - -######################################## -### Extract genesis.json and config.toml from a node in a cluster - -extract-config: - @if [ -z "$(DO_API_TOKEN)" ]; then echo "DO_API_TOKEN environment variable not set." ; false ; fi - @if ! [ -f $(HOME)/.ssh/id_rsa.pub ]; then ssh-keygen ; fi - cd remote/ansible && ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i inventory/digital_ocean.py -l "$(CLUSTER_NAME)" -e TESTNET_NAME="$(TESTNET_NAME)" -e GENESISFILE="$(GENESISFILE)" -e CONFIGFILE="$(CONFIGFILE)" extract-config.yml - - -######################################## -### Remote validator nodes using terraform and ansible in Digital Ocean - -validators-start: - @if [ -z "$(DO_API_TOKEN)" ]; then echo "DO_API_TOKEN environment variable not set." ; false ; fi - @if ! [ -f $(HOME)/.ssh/id_rsa.pub ]; then ssh-keygen ; fi - @if [ -z "`file $(BINARY) | grep 'ELF 64-bit'`" ]; then echo "Please build a linux binary using 'make build-linux'." ; false ; fi - cd remote/terraform-do && terraform init && (terraform workspace new "$(CLUSTER_NAME)" || terraform workspace select "$(CLUSTER_NAME)") && terraform apply -auto-approve -var DO_API_TOKEN="$(DO_API_TOKEN)" -var SSH_PUBLIC_FILE="$(HOME)/.ssh/id_rsa.pub" -var SSH_PRIVATE_FILE="$(HOME)/.ssh/id_rsa" -var TESTNET_NAME="$(CLUSTER_NAME)" -var SERVERS="$(SERVERS)" - cd remote/ansible && ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i inventory/digital_ocean.py -l "$(CLUSTER_NAME)" -u root -e BINARY=$(BINARY) -e TESTNET_NAME="$(TESTNET_NAME)" setup-validators.yml - cd remote/ansible && ansible-playbook -i inventory/digital_ocean.py -l "$(CLUSTER_NAME)" -u root start.yml - -validators-stop: - @if [ -z "$(DO_API_TOKEN)" ]; then echo "DO_API_TOKEN environment variable not set." ; false ; fi - cd remote/terraform-do && terraform workspace select "$(CLUSTER_NAME)" && terraform destroy -force -var DO_API_TOKEN="$(DO_API_TOKEN)" -var SSH_PUBLIC_FILE="$(HOME)/.ssh/id_rsa.pub" -var SSH_PRIVATE_FILE="$(HOME)/.ssh/id_rsa" && terraform workspace select default && terraform workspace delete "$(CLUSTER_NAME)" - rm -rf remote/ansible/keys/ - -validators-status: - cd remote/ansible && ansible-playbook -i inventory/digital_ocean.py -l "$(CLUSTER_NAME)" status.yml - - -######################################## -### Remote full nodes using terraform and ansible in Digital Ocean - -fullnodes-start: - @if [ -z "$(DO_API_TOKEN)" ]; then echo "DO_API_TOKEN environment variable not set." ; false ; fi - @if ! [ -f $(HOME)/.ssh/id_rsa.pub ]; then ssh-keygen ; fi - @if [ -z "`file $(BINARY) | grep 'ELF 64-bit'`" ]; then echo "Please build a linux binary using 'make build-linux'." ; false ; fi - cd remote/terraform-do && terraform init && (terraform workspace new "$(CLUSTER_NAME)" || terraform workspace select "$(CLUSTER_NAME)") && terraform apply -var DO_API_TOKEN="$(DO_API_TOKEN)" -var SSH_PUBLIC_FILE="$(HOME)/.ssh/id_rsa.pub" -var SSH_PRIVATE_FILE="$(HOME)/.ssh/id_rsa" -var TESTNET_NAME="$(CLUSTER_NAME)" -var SERVERS="$(SERVERS)" - cd remote/ansible && ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i inventory/digital_ocean.py -l "$(CLUSTER_NAME)" -e BINARY=$(BINARY) -e TESTNET_NAME="$(TESTNET_NAME)" -e GENESISFILE="$(GENESISFILE)" -e CONFIGFILE="$(CONFIGFILE)" setup-fullnodes.yml - cd remote/ansible && ansible-playbook -i inventory/digital_ocean.py -l "$(CLUSTER_NAME)" -u root start.yml - -fullnodes-stop: - @if [ -z "$(DO_API_TOKEN)" ]; then echo "DO_API_TOKEN environment variable not set." ; false ; fi - cd remote/terraform-do && terraform workspace select "$(CLUSTER_NAME)" && terraform destroy -force -var DO_API_TOKEN="$(DO_API_TOKEN)" -var SSH_PUBLIC_FILE="$(HOME)/.ssh/id_rsa.pub" -var SSH_PRIVATE_FILE="$(HOME)/.ssh/id_rsa" && terraform workspace select default && terraform workspace delete "$(CLUSTER_NAME)" - -fullnodes-status: - cd remote/ansible && ansible-playbook -i inventory/digital_ocean.py -l "$(CLUSTER_NAME)" status.yml - - -######################################## -### Other calls - -upgrade-gaiad: - @if [ -z "$(DO_API_TOKEN)" ]; then echo "DO_API_TOKEN environment variable not set." ; false ; fi - @if ! [ -f $(HOME)/.ssh/id_rsa.pub ]; then ssh-keygen ; fi - @if [ -z "`file $(BINARY) | grep 'ELF 64-bit'`" ]; then echo "Please build a linux binary using 'make build-linux'." ; false ; fi - cd remote/ansible && ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i inventory/digital_ocean.py -l "$(CLUSTER_NAME)" -e BINARY=$(BINARY) upgrade-gaiad.yml - -list: - remote/ansible/inventory/digital_ocean.py | python -c 'import json,sys ; print "\n".join(json.loads("".join(sys.stdin.readlines()))["$(CLUSTER_NAME)"]["hosts"])' - -install-datadog: - @if [ -z "$(DO_API_TOKEN)" ]; then echo "DO_API_TOKEN environment variable not set." ; false ; fi - @if [ -z "$(DD_API_KEY)" ]; then echo "DD_API_KEY environment variable not set." ; false ; fi - cd remote/ansible && ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i inventory/digital_ocean.py -l "$(CLUSTER_NAME)" -u root -e DD_API_KEY="$(DD_API_KEY)" -e TESTNET_NAME=$(TESTNET_NAME) -e CLUSTER_NAME=$(CLUSTER_NAME) install-datadog-agent.yml - -remove-datadog: - @if [ -z "$(DO_API_TOKEN)" ]; then echo "DO_API_TOKEN environment variable not set." ; false ; fi - cd remote/ansible && ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i inventory/digital_ocean.py -l "$(CLUSTER_NAME)" -u root remove-datadog-agent.yml - - -# To avoid unintended conflicts with file names, always add to .PHONY -# unless there is a reason not to. -# https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html -.PHONY: all extract-config validators-start validators-stop validators-status fullnodes-start fullnodes-stop fullnodes-status upgrade-gaiad list-do install-datadog remove-datadog - diff --git a/contrib/testnets/remote/terraform-do/README.md b/contrib/testnets/remote/terraform-do/README.md deleted file mode 100644 index a2bf4b0d..00000000 --- a/contrib/testnets/remote/terraform-do/README.md +++ /dev/null @@ -1,58 +0,0 @@ -# Terraform & Ansible - -WARNING: The Digital Ocean scripts are obsolete. They are here because they might still be useful for developers. - -Automated deployments are done using [Terraform](https://www.terraform.io/) to create servers on Digital Ocean then -[Ansible](http://www.ansible.com/) to create and manage testnets on those servers. - -## Prerequisites - -- Install [Terraform](https://www.terraform.io/downloads.html) and [Ansible](http://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) on a Linux machine. -- Create a [DigitalOcean API token](https://cloud.digitalocean.com/settings/api/tokens) with read and write capability. -- Install the python dopy package (`pip install dopy`) (This is necessary for the digitalocean.py script for ansible.) -- Create SSH keys - -``` -export DO_API_TOKEN="abcdef01234567890abcdef01234567890" -export TESTNET_NAME="remotenet" -export SSH_PRIVATE_FILE="$HOME/.ssh/id_rsa" -export SSH_PUBLIC_FILE="$HOME/.ssh/id_rsa.pub" -``` - -These will be used by both `terraform` and `ansible`. - -## Create a remote network - -``` -make remotenet-start -``` - -Optionally, you can set the number of servers you want to launch and the name of the testnet (which defaults to remotenet): - -``` -TESTNET_NAME="mytestnet" SERVERS=7 make remotenet-start -``` - -## Quickly see the /status endpoint - -``` -make remotenet-status -``` - -## Delete servers - -``` -make remotenet-stop -``` - -## Logging - -You can ship logs to Logz.io, an Elastic stack (Elastic search, Logstash and Kibana) service provider. You can set up your nodes to log there automatically. Create an account and get your API key from the notes on [this page](https://app.logz.io/#/dashboard/data-sources/Filebeat), then: - -``` -yum install systemd-devel || echo "This will only work on RHEL-based systems." -apt-get install libsystemd-dev || echo "This will only work on Debian-based systems." - -go install github.com/mheese/journalbeat -ansible-playbook -i inventory/digital_ocean.py -l remotenet logzio.yml -e LOGZIO_TOKEN=ABCDEFGHIJKLMNOPQRSTUVWXYZ012345 -``` diff --git a/contrib/testnets/remote/terraform-do/cluster/main.tf b/contrib/testnets/remote/terraform-do/cluster/main.tf deleted file mode 100644 index 07331ff3..00000000 --- a/contrib/testnets/remote/terraform-do/cluster/main.tf +++ /dev/null @@ -1,40 +0,0 @@ -resource "digitalocean_tag" "cluster" { - name = "${var.name}" -} - -resource "digitalocean_ssh_key" "cluster" { - name = "${var.name}" - public_key = "${file(var.ssh_public_file)}" -} - -resource "digitalocean_droplet" "cluster" { - name = "${var.name}-node${count.index}" - image = "centos-7-x64" - size = "${var.instance_size}" - region = "${element(var.regions, count.index)}" - ssh_keys = ["${digitalocean_ssh_key.cluster.id}"] - count = "${var.servers}" - tags = ["${digitalocean_tag.cluster.id}"] - - lifecycle = { - prevent_destroy = false - } - - connection { - private_key = "${file(var.ssh_private_file)}" - } - - provisioner "file" { - source = "files/terraform.sh" - destination = "/tmp/terraform.sh" - } - - provisioner "remote-exec" { - inline = [ - "chmod +x /tmp/terraform.sh", - "/tmp/terraform.sh ${var.name} ${count.index}", - ] - } - -} - diff --git a/contrib/testnets/remote/terraform-do/cluster/outputs.tf b/contrib/testnets/remote/terraform-do/cluster/outputs.tf deleted file mode 100644 index 78291b6a..00000000 --- a/contrib/testnets/remote/terraform-do/cluster/outputs.tf +++ /dev/null @@ -1,15 +0,0 @@ -// The cluster name -output "name" { - value = "${var.name}" -} - -// The list of cluster instance IDs -output "instances" { - value = ["${digitalocean_droplet.cluster.*.id}"] -} - -// The list of cluster instance public IPs -output "public_ips" { - value = ["${digitalocean_droplet.cluster.*.ipv4_address}"] -} - diff --git a/contrib/testnets/remote/terraform-do/cluster/variables.tf b/contrib/testnets/remote/terraform-do/cluster/variables.tf deleted file mode 100644 index e2654b11..00000000 --- a/contrib/testnets/remote/terraform-do/cluster/variables.tf +++ /dev/null @@ -1,30 +0,0 @@ -variable "name" { - description = "The cluster name, e.g remotenet" -} - -variable "regions" { - description = "Regions to launch in" - type = "list" - default = ["AMS2", "TOR1", "LON1", "NYC3", "SFO2", "SGP1", "FRA1"] -} - -variable "ssh_private_file" { - description = "SSH private key filename to use to connect to the nodes" - type = "string" -} - -variable "ssh_public_file" { - description = "SSH public key filename to copy to the nodes" - type = "string" -} - -variable "instance_size" { - description = "The instance size to use" - default = "2gb" -} - -variable "servers" { - description = "Desired instance count" - default = 4 -} - diff --git a/contrib/testnets/remote/terraform-do/files/terraform.sh b/contrib/testnets/remote/terraform-do/files/terraform.sh deleted file mode 100644 index 60b4dd8e..00000000 --- a/contrib/testnets/remote/terraform-do/files/terraform.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash -# Script to initialize a testnet settings on a server - -#Usage: terraform.sh - -#Add gaiad node number for remote identification -echo "$2" > /etc/nodeid - diff --git a/contrib/testnets/remote/terraform-do/main.tf b/contrib/testnets/remote/terraform-do/main.tf deleted file mode 100644 index fb78a378..00000000 --- a/contrib/testnets/remote/terraform-do/main.tf +++ /dev/null @@ -1,43 +0,0 @@ -#Terraform Configuration - -variable "DO_API_TOKEN" { - description = "DigitalOcean Access Token" -} - -variable "TESTNET_NAME" { - description = "Name of the testnet" - default = "remotenet" -} - -variable "SSH_PRIVATE_FILE" { - description = "SSH private key file to be used to connect to the nodes" - type = "string" -} - -variable "SSH_PUBLIC_FILE" { - description = "SSH public key file to be used on the nodes" - type = "string" -} - -variable "SERVERS" { - description = "Number of nodes in testnet" - default = "4" -} - -provider "digitalocean" { - token = "${var.DO_API_TOKEN}" -} - -module "cluster" { - source = "./cluster" - name = "${var.TESTNET_NAME}" - ssh_private_file = "${var.SSH_PRIVATE_FILE}" - ssh_public_file = "${var.SSH_PUBLIC_FILE}" - servers = "${var.SERVERS}" -} - - -output "public_ips" { - value = "${module.cluster.public_ips}" -} - diff --git a/contrib/testnets/test_platform/README.md b/contrib/testnets/test_platform/README.md deleted file mode 100644 index a360a4d5..00000000 --- a/contrib/testnets/test_platform/README.md +++ /dev/null @@ -1,45 +0,0 @@ -# Gaiad Testnet Tool - -This python tool starts multiple gaiad instances on the same machine without virtualization, i.e., non-conflicting ports are used. - -This tool aims to simplify testing of key Cosmos Hub operations, such as module deployments and upgrades. - -## Features - -1. All ports automatically incremented by 10 -1. Gaiad nodes peer with all other nodes -1. Gaiad nodes all started on one machine without conflict -1. All nodes generate, propose, and vote on blocks -1. Stopping app stops all instances -1. Support specifying a pre-existing genesis file -1. Supports taking a pre-existing genesis file and creating a network with a sufficient number of validators. The network - creates as many validators as needed to attain majority voting power on the new network (and produce new blocks with pre-existing genesis file). - The validators that are replaced is the set that provides at least 66% of the total voting power given in the genesis file. - - **This feature allows testing upgrades and module migrations of existing networks, using their pre-existing genesis** :star: - -## Usage - -1. Configure `template/replacement_defaults.txt`: - 1. To create a network from scratch: - 1. Set `replacement_genesis` value to blank, e.g., `replacement_genesis=` - 1. Set `num_of_nodes_to_apply` to the _number of nodes to run_, e.g., `num_of_nodes_to_apply=4` - 1. To create a network based on an existing genesis file: - 1. Set `replacement_genesis` to the source genesis file; `.tar.gz` files are also supported - 1. Set `replacement_genesis_make_safe` to `True` in order to create as many nodes as needed to run a majority of validators. - 1. Otherwise, set `replacement_genesis_make_safe` value to blank to create `num_of_nodes_to_apply` nodes, e.g., `replacement_genesis_make_safe=`. - Important: if the `replacement_genesis_make_safe` is not set, then the validator keys in the genesis file aren't replaced and so the network may not produce new blocks. - 1. Optionally, set `LOG_LEVEL` to one of _(trace | debug | info | warn | error | fatal | panic)_; default _info_ -1. Start `gaiad_config_manager.py` - -Notes for `template/replacement_defaults.txt`: - -- only the last occurrence of a key and it's value are used, i.e., earlier occurrences are overwritten. -- keys ending in `_PORT` are automatically incremented for each node - -## Upcoming features - -1. custom network architectures -1. custom failure testing -1. ibc integration testing -1. module integration testing diff --git a/contrib/testnets/test_platform/gaiad_config_manager.py b/contrib/testnets/test_platform/gaiad_config_manager.py deleted file mode 100644 index 0c2693e9..00000000 --- a/contrib/testnets/test_platform/gaiad_config_manager.py +++ /dev/null @@ -1,233 +0,0 @@ -import json -import os -import shutil -import subprocess -import time - -working_directory = os.getcwd() + "/" - -template_input = working_directory + 'templates/replacement_defaults.txt' - -template_file_config = working_directory + 'templates/config.toml' -template_file_app = working_directory + 'templates/app.toml' - -target_dir = working_directory + 'mytestnet/node0/gaiad/config/' - -target_files = [] -target_configs = [] -target_apps = [] - -# port sequence parameters -port_sequence = 0 -port_increment = 10 - -# take a template -# replace template parameters -# increment ports -# set peers -# apply genesis if one is specified -# overwrite configs for each target - -# collect testnet validator pub keys -# copy genesis file to targets - -# get genesis validators info -# choose which validators to replace -# create pubkey_replacement file -# use pubkey_replacement option on start - -# read the template as a single string -with open(template_file_config, 'r') as file: - # template_config = file.read().replace('\n', '') - template_config = file.read() - -with open(template_file_app, 'r') as file: - template_app = file.read() - # .replace('\n', '') - -# populate template replacements from input file -template_replacements = {} -with open(template_input, 'r') as file: - template_lines = file.readlines() - for lin in template_lines: - lin = lin.strip() - if len(lin) == 0: - continue - line_separations = lin.split("=") - template_replacements[line_separations[0]] = line_separations[1] - - -def make_replacements(template, port_sequence): - intermediate_template = template - # make template replacements - for k in template_replacements: - # allow special care for ports and their increments - if k.endswith("_PORT"): - intermediate_template = intermediate_template.replace("<" + k + ">", str(int(template_replacements[k]) + port_sequence)) - else: - intermediate_template = intermediate_template.replace("<" + k + ">", template_replacements[k]) - return intermediate_template - - -local_sequence = 0 - - -def get_validator_pubkey(target_dir): - val_pub_key = subprocess.check_output(['gaiad', 'tendermint', 'show-validator', '--home', target_dir.rstrip('/config')]) - val_pub_key = val_pub_key.decode("utf-8").rstrip('\n') - return val_pub_key - - -def get_validator_id(target_dir): - global local_sequence - # subprocess.call(['gaiad', 'init', '--home', target_dir]) - nodeid = subprocess.check_output(['gaiad', 'tendermint', 'show-node-id', '--home', str(target_dir).rstrip("config/")]) - peer_id = str(nodeid.decode("utf-8").rstrip('\n') + '@' + template_replacements['P2P_PEERID_IP'] + ':' + str(int(template_replacements['P2P_LADDR_PORT']) + int(local_sequence))) - local_sequence += 10 - return peer_id - - -common_genesis = working_directory + template_replacements['replacement_genesis'] -# support compressed genesis files -if common_genesis.endswith(".tar.gz"): - unzip_cmd = "tar zxvf " + common_genesis + " --cd " + working_directory + "templates" - print("unzip_cmd:" + unzip_cmd) - subprocess.call(unzip_cmd, shell=True) - common_genesis = common_genesis.rstrip(".tar.gz") - -if len(template_replacements['replacement_genesis']) > 0: - # cat genesis.cosmoshub-4.json| jq -s '.[].validators[] | { address: .address, power: .power, name: .name }' - # genesis_validator_set = subprocess.check_output(['cat ' + str(common_genesis) + " | jq -s '.[].validators[] | { address: .address, power: .power, name: .name }'"], shell=True) - genesis_validator_set = subprocess.check_output(['cat ' + str(common_genesis) + " | jq -s '.[].app_state.staking.validators[] | { address: .operator_address, power: .tokens, name: .description.moniker }'"], shell=True) - print("genesis validator set:" + str(genesis_validator_set)) - genesis_valset_python = json.loads("[" + genesis_validator_set.decode("utf-8").replace("}", "},") + "{}]") - # sort validator records by decreasing power - # genesis_valset_sorted2 = sorted(genesis_valset_python[:-1], key=lambda k: print(k)) - genesis_valset_sorted2 = sorted(genesis_valset_python[:-1], key=lambda k: -int(k["power"])) - print("sorted valset:" + str(genesis_valset_sorted2)) - - total_power = 0 - for r in genesis_valset_sorted2: - total_power += int(r["power"]) - - safe_percentage = 0.66 - safe_absolute = total_power * safe_percentage - safe_index = 0 - safe_index_scan_stop = False - - rolling_percentage = 0 - print("rolling percentage:") - for i, r in enumerate(genesis_valset_sorted2): - rolling_percentage += int(r["power"]) / total_power - print(str(i) + ":" + str(rolling_percentage)) - if rolling_percentage > safe_percentage and not safe_index_scan_stop: - safe_index_scan_stop = True - safe_index = i - print("liveness index:" + str(safe_index)) - - if len(template_replacements['replacement_genesis_make_safe']) > 0: - # gaiad testnet --keyring-backend test --v 4 - print("Creating testnet subdirectories") - subprocess.call(['rm', '-rf', working_directory + 'mytestnet']) - subprocess.call(['gaiad', 'testnet', '--keyring-backend', 'test', '--v', str(safe_index)]) - - # specify the output - for node_num in range(safe_index): - target_file = target_dir.replace("node0", "node" + str(node_num)) - target_files.append(target_file) - # subprocess.call("gaiad init node" + str(node_num) + " -o --home "+target_file.rstrip('/config'), shell=True) - subprocess.call('gaiad unsafe-reset-all --home ' + target_file.rstrip('/config'), shell=True) - - print("target_files:"+str(target_files)) - - # collect validator pubkeys for replacement - testnet_validator_pubkeys = [get_validator_pubkey(t) for t in target_files] - print('testnet validator pubkeys:' + str(testnet_validator_pubkeys)) - - output_els = [] - for v_index in range(safe_index): - output_els.append({ - "validator_name": genesis_valset_sorted2[v_index]["name"], - "validator_address": genesis_valset_sorted2[v_index]["address"], - "stargate_consensus_public_key": testnet_validator_pubkeys[v_index] - }) - print("replacement keys:" + str(json.dumps(output_els))) - - with open(working_directory + 'templates/validator_replacement_output.json', 'w') as f: - f.write(str(json.dumps(output_els))) - - # gaiad migrate cosmoshub_3_genesis_export.json --chain-id=cosmoshub-4 --initial-height [last_cosmoshub-3_block+1] > genesis.json - print("migration genesis:" + str(common_genesis)) - cmd_string = 'gaiad migrate ' + common_genesis + ' --chain-id cosmoshub-4 --initial-height 0 --replacement-cons-keys ' + working_directory + 'templates/validator_replacement_output.json > ' + working_directory + 'templates/genesis_replaced.json' - print("cmd_string:" + cmd_string) - subprocess.call([cmd_string], shell=True) - - # compress genesis - subprocess.call('tar zcvf ' + working_directory + 'templates/genesis_replaced.json.tar.gz --cd ' + working_directory + 'templates genesis_replaced.json', shell=True) - - common_genesis = working_directory + 'templates/genesis_replaced.json' - - # create each target's config files - for target_file in target_files: - # copy genesis if a file path to a genesis file is set - print("common_genesis:" + common_genesis) - shutil.copy2(common_genesis, target_file + 'genesis.json') -else: - # gaiad testnet --keyring-backend test --v 4 - print("Creating testnet subdirectories") - subprocess.call(['rm', '-rf', working_directory + 'mytestnet']) - - num_of_nodes_to_apply = int(template_replacements["num_of_nodes_to_apply"]) - - # specify the output - # target_files = [] - for node_num in range(num_of_nodes_to_apply): - target_files.append(target_dir.replace("node0", "node" + str(node_num))) - - subprocess.call(['gaiad', 'testnet', '--keyring-backend', 'test', '--v', str(num_of_nodes_to_apply)]) - -peer_ids = [get_validator_id(t) for t in target_files] -peers = ",".join(peer_ids) -print("testnet peer ids:" + peers) -main_template_config = template_config.replace("", peers) - -# collect validator pubkeys for replacement -testnet_validator_pubkeys = [get_validator_pubkey(t) for t in target_files] -print('testnet validator pubkeys:' + str(testnet_validator_pubkeys)) - - -# give the node some time to start if this is a genesis file with a lot of state, Cosmos Hub 4 mainnet requires at least 10 minutes -# time.sleep(60 * 10) -# tendermint_validator_set = subprocess.check_output(['gaiad', 'query', 'tendermint-validator-set']).decode("utf-8").rstrip('\n') -# print("tendermint_validator_set:" + tendermint_validator_set) - -for target_file in target_files: - # make replacements to app and config toml files - current_template_config = make_replacements(main_template_config, port_sequence) - target_configs.append(current_template_config) - current_template_app = make_replacements(template_app, port_sequence) - target_apps.append(current_template_app) - port_sequence += port_increment - - # backup current file, but we choose to overwrite instead - # shutil.copy2(file, file+"-"+str(time.time_ns())+".bak") - - # print(current_template_config) - # print(current_template_app) - - # make sure target path exists - os.makedirs(os.path.dirname(target_file), exist_ok=True) - - # save the config.toml production - with open(target_file + 'config.toml', 'w') as f: - f.write(current_template_config) - - # save the app.toml production - with open(target_file + 'app.toml', 'w') as f: - f.write(current_template_app) - - proc = subprocess.Popen(['gaiad', 'start', '--home', target_file.rstrip('/config'), '--x-crisis-skip-assert-invariants']) - - # automatically terminate program (and thus all gaiad instances) after some time - -time.sleep(300) diff --git a/contrib/testnets/test_platform/templates/3924406.cosmoshub-3.json.tar.gz b/contrib/testnets/test_platform/templates/3924406.cosmoshub-3.json.tar.gz deleted file mode 100644 index 8fe8f2f3..00000000 Binary files a/contrib/testnets/test_platform/templates/3924406.cosmoshub-3.json.tar.gz and /dev/null differ diff --git a/contrib/testnets/test_platform/templates/app.toml b/contrib/testnets/test_platform/templates/app.toml deleted file mode 100644 index 3005afa7..00000000 --- a/contrib/testnets/test_platform/templates/app.toml +++ /dev/null @@ -1,177 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Base Configuration ### -############################################################################### - -# The minimum gas prices a validator is willing to accept for processing a -# transaction. A transaction's fees must meet the minimum of any denomination -# specified in this config (e.g. 0.25token1;0.0001token2). -minimum-gas-prices = "0.25stake" - -# default: the last 100 states are kept in addition to every 500th state; pruning at 10 block intervals -# nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node) -# everything: all saved states will be deleted, storing only the current state; pruning at 10 block intervals -# custom: allow pruning options to be manually specified through 'pruning-keep-recent', 'pruning-keep-every', and 'pruning-interval' -pruning = "default" - -# These are applied if and only if the pruning strategy is custom. -pruning-keep-recent = "0" -pruning-keep-every = "0" -pruning-interval = "0" - -# HaltHeight contains a non-zero block height at which a node will gracefully -# halt and shutdown that can be used to assist upgrades and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-height = 0 - -# HaltTime contains a non-zero minimum block time (in Unix seconds) at which -# a node will gracefully halt and shutdown that can be used to assist upgrades -# and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-time = 0 - -# MinRetainBlocks defines the minimum block height offset from the current -# block being committed, such that all blocks past this offset are pruned -# from CometBFT. It is used as part of the process of determining the -# ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates -# that no blocks should be pruned. -# -# This configuration value is only responsible for pruning CometBFT blocks. -# It has no bearing on application state pruning which is determined by the -# "pruning-*" configurations. -# -# Note: CometBFT block pruning is dependant on this parameter in conunction -# with the unbonding (safety threshold) period, state pruning and state sync -# snapshot parameters to determine the correct minimum value of -# ResponseCommit.RetainHeight. -min-retain-blocks = 0 - -# InterBlockCache enables inter-block caching. -inter-block-cache = true - -# IndexEvents defines the set of events in the form {eventType}.{attributeKey}, -# which informs CometBFT what to index. If empty, all events will be indexed. -# -# Example: -# ["message.sender", "message.recipient"] -index-events = [] - -############################################################################### -### Telemetry Configuration ### -############################################################################### - -[telemetry] - -# Prefixed with keys to separate services. -service-name = "" - -# Enabled enables the application telemetry functionality. When enabled, -# an in-memory sink is also enabled by default. Operators may also enabled -# other sinks such as Prometheus. -enabled = true - -# Enable prefixing gauge values with hostname. -enable-hostname = false - -# Enable adding hostname to labels. -enable-hostname-label = false - -# Enable adding service to labels. -enable-service-label = false - -# PrometheusRetentionTime, when positive, enables a Prometheus metrics sink. -prometheus-retention-time = 60 - -# GlobalLabels defines a global set of name/value label tuples applied to all -# metrics emitted using the wrapper functions defined in telemetry package. -# -# Example: -# [["chain_id", "cosmoshub-1"]] -global-labels = [ - ["chain_id", "chain-PVndZc"], -] - -############################################################################### -### API Configuration ### -############################################################################### - -[api] - -# Enable defines if the API server should be enabled. -enable = true - -# Swagger defines if swagger documentation should automatically be registered. -swagger = false - -# Address defines the API server to listen on. -address = ":" - -# MaxOpenConnections defines the number of maximum open connections. -max-open-connections = 1000 - -# RPCReadTimeout defines the CometBFT RPC read timeout (in seconds). -rpc-read-timeout = 10 - -# RPCWriteTimeout defines the CometBFT RPC write timeout (in seconds). -rpc-write-timeout = 0 - -# RPCMaxBodyBytes defines the CometBFT maximum response body (in bytes). -rpc-max-body-bytes = 1000000 - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enabled-unsafe-cors = false - -############################################################################### -### gRPC Configuration ### -############################################################################### - -[grpc] - -# Enable defines if the gRPC server should be enabled. -enable = true - -# Address defines the gRPC server address to bind to. -address = ":" - -############################################################################### -### gRPC Web Configuration ### -############################################################################### - -[grpc-web] - -# GRPCWebEnable defines if the gRPC-web should be enabled. -# NOTE: gRPC must also be enabled, otherwise, this configuration is a no-op. -enable = true - -# Address defines the gRPC-web server address to bind to. -#address = "0.0.0.0:9091" -address = ":" - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enable-unsafe-cors = false - -############################################################################### -### State Sync Configuration ### -############################################################################### - -# State sync snapshots allow other nodes to rapidly join the network without replaying historical -# blocks, instead downloading and applying a snapshot of the application state at a given height. -[state-sync] - -# snapshot-interval specifies the block interval at which local state sync snapshots are -# taken (0 to disable). Must be a multiple of pruning-keep-every. -snapshot-interval = 0 - -# snapshot-keep-recent specifies the number of recent snapshots to keep and serve (0 to keep all). -snapshot-keep-recent = 2 - -[wasm] -# This is the maximum sdk gas (wasm and storage) that we allow for any x/wasm "smart" queries -query_gas_limit = 300000 -# This is the number of wasm vm instances we keep cached in memory for speed-up -# Warning: this is currently unstable and may lead to crashes, best to keep for 0 unless testing locally -lru_size = 0 \ No newline at end of file diff --git a/contrib/testnets/test_platform/templates/config.toml b/contrib/testnets/test_platform/templates/config.toml deleted file mode 100644 index 6f7a0fed..00000000 --- a/contrib/testnets/test_platform/templates/config.toml +++ /dev/null @@ -1,393 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or -# relative to the home directory (e.g. "data"). The home directory is -# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable -# or --home cmd flag. - -####################################################################### -### Main Base Config Options ### -####################################################################### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = ":" - -# A custom human readable name for this node -moniker = "" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb -# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) -# - pure go -# - stable -# * cleveldb (uses levigo wrapper) -# - fast -# - requires gcc -# - use cleveldb build tag (go build -tags cleveldb) -# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) -# - EXPERIMENTAL -# - may be faster is some use-cases (random reads - indexer) -# - use boltdb build tag (go build -tags boltdb) -# * rocksdb (uses github.com/tecbot/gorocksdb) -# - EXPERIMENTAL -# - requires gcc -# - use rocksdb build tag (go build -tags rocksdb) -# * badgerdb (uses github.com/dgraph-io/badger) -# - EXPERIMENTAL -# - use badgerdb build tag (go build -tags badgerdb) -db_backend = "goleveldb" - -# Database directory -db_dir = "data" - -# Output level for logging, including package level options -log_level = "" - -# Output format: 'plain' (colored text) or 'json' -log_format = "plain" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_key_file = "config/priv_validator_key.json" - -# Path to the JSON file containing the last sign state of a validator -priv_validator_state_file = "data/priv_validator_state.json" - -# TCP or UNIX socket address for Tendermint to listen on for -# connections from an external PrivValidator process -priv_validator_laddr = "" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - - -####################################################################### -### Advanced Configuration Options ### -####################################################################### - -####################################################### -### RPC Server Configuration Options ### -####################################################### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = ":" - -# A list of origins a cross-domain request can be executed from -# Default value '[]' disables cors support -# Use '["*"]' to allow any origin -cors_allowed_origins = [] - -# A list of methods the client is allowed to use with cross-domain requests -cors_allowed_methods = ["HEAD", "GET", "POST", ] - -# A list of non simple headers the client is allowed to use with cross-domain requests -cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = ":" - -# Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -grpc_max_open_connections = 900 - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -# Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc_max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -max_open_connections = 900 - -# Maximum number of unique clientIDs that can /subscribe -# If you're using /broadcast_tx_commit, set to the estimated maximum number -# of broadcast_tx_commit calls per block. -max_subscription_clients = 100 - -# Maximum number of unique queries a given client can /subscribe to -# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to -# the estimated # maximum number of broadcast_tx_commit calls per block. -max_subscriptions_per_client = 5 - -# How long to wait for a tx to be committed during /broadcast_tx_commit. -# WARNING: Using a value larger than 10s will result in increasing the -# global HTTP write timeout, which applies to all connections and endpoints. -# See https://github.com/tendermint/tendermint/issues/3435 -timeout_broadcast_tx_commit = "10s" - -# Maximum size of request body, in bytes -max_body_bytes = 1000000 - -# Maximum size of request header, in bytes -max_header_bytes = 1048576 - -# The path to a file containing certificate that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# If the certificate is signed by a certificate authority, -# the certFile should be the concatenation of the server's certificate, any intermediates, -# and the CA's certificate. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_cert_file = "" - -# The path to a file containing matching private key that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_key_file = "" - -# pprof listen address (https://golang.org/pkg/net/http/pprof) -pprof_laddr = ":" - -####################################################### -### P2P Configuration Options ### -####################################################### -[p2p] - -# Address to listen for incoming connections -laddr = ":" - -# Address to advertise to peers for them to dial -# If empty, will use the same port as the laddr, -# and will introspect on the listener or use UPnP -# to figure out the address. -external_address = "" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -persistent_peers = "" - -# UPNP port forwarding -upnp = false - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -# Set false for private or local networks -addr_book_strict = true - -# Maximum number of inbound peers -max_num_inbound_peers = 40 - -# Maximum number of outbound peers to connect to, excluding persistent peers -max_num_outbound_peers = 10 - -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -unconditional_peer_ids = "" - -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -persistent_peers_max_dial_period = "0s" - -# Time to wait before flushing messages out on the connection -flush_throttle_timeout = "100ms" - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 5120000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 5120000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -# Toggle to disable guard against peers connecting from the same ip. (default is false) -allow_duplicate_ip = - -# Peer connection configuration. -handshake_timeout = "20s" -dial_timeout = "3s" - -####################################################### -### Mempool Configuration Option ### -####################################################### -[mempool] - -recheck = true -broadcast = true -wal_dir = "" - -# Maximum number of transactions in the mempool -size = 5000 - -# Limit the total size of all txs in the mempool. -# This only accounts for raw transactions (e.g. given 1MB transactions and -# max_txs_bytes=5MB, mempool will only accept 5 transactions). -max_txs_bytes = 1073741824 - -# Size of the cache (used to filter transactions we saw earlier) in transactions -cache_size = 10000 - -# Do not remove invalid transactions from the cache (default: false) -# Set to true if it's not possible for any invalid transaction to become valid -# again in the future. -keep-invalid-txs-in-cache = false - -# Maximum size of a single transaction. -# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = 1048576 - -# Maximum size of a batch of transactions to send to a peer -# Including space needed by encoding (one varint per transaction). -# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 -max_batch_bytes = 0 - -####################################################### -### State Sync Configuration Options ### -####################################################### -[statesync] -# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine -# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in -# the network to take and serve state machine snapshots. State sync is not attempted if the node -# has any local state (LastBlockHeight > 0). The node will have a truncated block history, -# starting from the height of the snapshot. -enable = false - -# RPC servers (comma-separated) for light client verification of the synced state machine and -# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding -# header hash obtained from a trusted source, and a period during which validators can be trusted. -# -# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 -# weeks) during which they can be financially punished (slashed) for misbehavior. -rpc_servers = "" -trust_height = 0 -trust_hash = "" -trust_period = "168h0m0s" - -# Time to spend discovering snapshots before initiating a restore. -discovery_time = "15s" - -# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). -# Will create a new, randomly named directory within, and remove it when done. -temp_dir = "" - -####################################################### -### Fast Sync Configuration Connections ### -####################################################### -[fastsync] - -# Fast Sync version to use: -# 1) "v0" (default) - the legacy fast sync implementation -# 2) "v1" - refactor of v0 version for better testability -# 2) "v2" - complete redesign of v0, optimized for testability & readability -version = "v0" - -####################################################### -### Consensus Configuration Options ### -####################################################### -[consensus] - -wal_file = "data/cs.wal/wal" - -# How long we wait for a proposal block before prevoting nil -timeout_propose = "3s" -# How much timeout_propose increases with each round -timeout_propose_delta = "500ms" -# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) -timeout_prevote = "1s" -# How much the timeout_prevote increases with each round -timeout_prevote_delta = "500ms" -# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) -timeout_precommit = "1s" -# How much the timeout_precommit increases with each round -timeout_precommit_delta = "500ms" -# How long we wait after committing a block, before starting on the new -# height (this gives us a chance to receive some more precommits, even -# though we already have +2/3). -timeout_commit = "5s" - -# How many blocks to look back to check existence of the node's consensus votes before joining consensus -# When non-zero, the node will panic upon restart -# if the same consensus key was used to sign {double_sign_check_height} last blocks. -# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. -double_sign_check_height = 0 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# EmptyBlocks mode and possible interval between empty blocks -create_empty_blocks = true -create_empty_blocks_interval = "0s" - -# Reactor sleep duration parameters -peer_gossip_sleep_duration = "100ms" -peer_query_maj23_sleep_duration = "2s" - -####################################################### -### Transaction Indexer Configuration Options ### -####################################################### -[tx_index] - -# What indexer to use for transactions -# -# The application will set which txs to index. In some cases a node operator will be able -# to decide which txs to index based on configuration set in the application. -# -# Options: -# 1) "null" -# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. -indexer = "kv" - -####################################################### -### Instrumentation Configuration Options ### -####################################################### -[instrumentation] - -# When true, Prometheus metrics are served under /metrics on -# PrometheusListenAddr. -# Check out the documentation for the list of available metrics. -prometheus = - -# Address to listen for Prometheus collector(s) connections -prometheus_listen_addr = ":" - -# Maximum number of simultaneous connections. -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -max_open_connections = 3 - -# Instrumentation namespace -namespace = "tendermint" diff --git a/contrib/testnets/test_platform/templates/replacement_defaults.txt b/contrib/testnets/test_platform/templates/replacement_defaults.txt deleted file mode 100644 index d4224e5b..00000000 --- a/contrib/testnets/test_platform/templates/replacement_defaults.txt +++ /dev/null @@ -1,31 +0,0 @@ -replacement_genesis_make_safe=True -replacement_genesis=templates/3924406.cosmoshub-3.json -replacement_genesis=templates/3924406.cosmoshub-3.json.tar.gz -replacement_genesis= -num_of_nodes_to_apply=4 -LOG_LEVEL=debug -P2P_PEERID_IP=0.0.0.0 -P2P_LADDR=tcp://0.0.0.0 -P2P_LADDR_PORT=26656 -RPC_LADDR=tcp://0.0.0.0 -RPC_LADDR_PORT=26657 -PROXY_APP=tcp://127.0.0.1 -PROXY_APP_PORT=26658 -GRPC_LADDR=tcp://0.0.0.0 -GRPC_LADDR_PORT=26659 -GRPC_APP_ADDR=0.0.0.0 -GRPC_APP_ADDR_PORT=9090 -GRPC_WEB_APP_ADDR=0.0.0.0 -GRPC_WEB_APP_ADDR_PORT=9091 -PROMETHEUS_LISTEN_ADDR= -PROMETHEUS_LISTEN_ADDR_PORT=26660 -MONIKER=node0 -SEEDS= -PERSISTENT_PEERS= -PRIVATE_PEER_IDS= -ALLOW_DUPLICATE_IP=true -PROMETHEUS=false -API_ADDRESS=tcp://0.0.0.0 -API_ADDRESS_PORT=1317 -PPROF_LADDR=localhost -PPROF_LADDR_PORT=6060 \ No newline at end of file diff --git a/contrib/testnets/test_platform/templates/validator_replacement_example.json b/contrib/testnets/test_platform/templates/validator_replacement_example.json deleted file mode 100644 index 6e587c63..00000000 --- a/contrib/testnets/test_platform/templates/validator_replacement_example.json +++ /dev/null @@ -1,394 +0,0 @@ -[ - { - "validator_name": "01node", - "validator_address": "cosmosvaloper17mggn4znyeyg25wd7498qxl7r2jhgue8u4qjcq", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqh6xjlwnd50hm6q8j3n6znaykj0xwwy38r6qhc32tlxp3uxh7aa6ql58jar" - }, - { - "validator_name": "Alphemy Capital", - "validator_address": "cosmosvaloper16zgjnqxryhq2kftfuv8urp50x0xwt5dagemhfl", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqeumluaf7n7ss9qmaj43v4gd4dzj5jfw7cdnn6vngwd2m7gmu48lqsyy3wa" - }, - { - "validator_name": "alexandruast", - "validator_address": "cosmosvaloper1ualhu3fjgg77g485gmyswkq3w0dp7gys6qzwrv", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqapkvxxn8dwvf0alz8a506qa8v7ald0su95ypnfrg24rdp7fme7gqadpucf" - }, - { - "validator_name": "ATEAM", - "validator_address": "cosmosvaloper14l0fp639yudfl46zauvv8rkzjgd4u0zk2aseys", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepq7jsrkl9fgqk0wj3ahmfr8pgxj6vakj2wzn656s8pehh0zhv2w5as5gd80a" - }, - { - "validator_name": "AUDIT.one", - "validator_address": "cosmosvaloper1udpsgkgyutgsglauk9vk9rs03a3skc62gup9ny", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqwc0psjv0ta2trx3nf6zgwrhkwquws0026xv94aj7as6we9xm47yqzl7v04" - }, - { - "validator_name": "Bison Trails", - "validator_address": "cosmosvaloper1crqm3598z6qmyn2kkcl9dz7uqs4qdqnr6s8jdn", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqtqyz8x2nf5rpq4cq2g6y4rcflcxk5d4kjy5ugsqmpelaxd3kngdsmqhz9k" - }, - { - "validator_name": "blockscape", - "validator_address": "cosmosvaloper13x77yexvf6qexfjg9czp6jhpv7vpjdwwkyhe4p", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepq6cp6vs95f0xwfle30wj07n0qexq7f5a0zgvz60mnjz03kl0evfwqgrne5q" - }, - { - "validator_name": "Bi23 Labs", - "validator_address": "cosmosvaloper1hvsdf03tl6w5pnfvfv5g8uphjd4wfw2h4gvnl7", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqxpdkmv2vka547zq99grw0memcg5a6jyzmf5sdhkvk57kqqa2yy7s8vcr4p" - }, - { - "validator_name": "B-Harvest", - "validator_address": "cosmosvaloper10e4vsut6suau8tk9m6dnrm0slgd6npe3jx5xpv", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqzsr79csnz2cltwue35gnr683psg6utmd6464jzhfxrefu5rkcldq8ckjl0" - }, - { - "validator_name": "CCN", - "validator_address": "cosmosvaloper1qaa9zej9a0ge3ugpx3pxyx602lxh3ztqgfnp42", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepq944ta9a9gaj0ldecyj974d48erglwvwrlqzrpmft5pc5hy298ksqarvq6j" - }, - { - "validator_name": "Cephalopod Equipment Corp", - "validator_address": "cosmosvaloper16k579jk6yt2cwmqx9dz5xvq9fug2tekvlu9qdv", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepq4f2zzkjr20v4gt0gy9nmdznxsvmgluyn3vnjmgqq58q9t47uyffqndgy0p" - }, - - { - "validator_name": "Certus One", - "validator_address": "cosmosvaloper1qwl879nx9t6kef4supyazayf7vjhennyh568ys", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqxle856ffpp8jt9j4wgnv20prpvf60uktsrq2fmz73xkujhvwtylqv8fnfu" - }, - { - "validator_name": "chainflow-cosmos-prodval-01", - "validator_address": "cosmosvaloper1j0vaeh27t4rll7zhmarwcuq8xtrmvqhudrgcky", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqte39vggwm75vx65grlv0w7k92eeysv35s20cfcepv5hmx5p58tusjwym4k" - }, - { - "validator_name": "Chorus One", - "validator_address": "cosmosvaloper15urq2dtp9qce4fyc85m6upwm9xul3049e02707", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqauemxajnn7vemct6p3705acwfcvr9z4saw73ek6f8kapp7z5gjeqzhvhv4" - }, - - { - "validator_name": "Citadel.one", - "validator_address": "cosmosvaloper1lzhlnpahvznwfv4jmay2tgaha5kmz5qxerarrl", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepquuaszms40adxzwkjr5ew4gj8s49dyjx5zsncar4cqfk4wh07eh8sjxjeaf" - }, - { - "validator_name": "Citizen Cosmos", - "validator_address": "cosmosvaloper1e859xaue4k2jzqw20cv6l7p3tmc378pc3k8g2u", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqehuq39q3pkvalrjqmq8uf2f0dfs07uezs78wdwmzlwhkqn09gzwq0gdsal" - }, - { - "validator_name": "Compass", - "validator_address": "cosmosvaloper1ff0dw8kawsnxkrgj7p65kvw7jxxakyf8n583gx", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqu08f7tuce8k88tgewhwer69kfvk5az3cn5lz3v8phl8gvu9nxu8qhrjxfj" - }, - { - "validator_name": "Citizen Cosmos", - "validator_address": "cosmosvaloper1e859xaue4k2jzqw20cv6l7p3tmc378pc3k8g2u", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqehuq39q3pkvalrjqmq8uf2f0dfs07uezs78wdwmzlwhkqn09gzwq0gdsal" - }, - { - "validator_name": "CoinoneNode", - "validator_address": "cosmosvaloper1te8nxpc2myjfrhaty0dnzdhs5ahdh5agzuym9v", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqjg26g27dtvjqstyqktmp4jsn98473vfz0mek2eyklfp0yqapav5szdrvpd" - }, - { - "validator_name": "Cosmic Validator", - "validator_address": "cosmosvaloper1de7qx00pz2j6gn9k88ntxxylelkazfk3g8fgh9", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqwr5p8j076mfydn7wckqz748lr0j50zwgsftnfpvgz6rz0rkvvqwqg5fyaf" - }, - { - "validator_name": "Cosmostation", - "validator_address": "cosmosvaloper1clpqr4nrk4khgkxj78fcwwh6dl3uw4epsluffn", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqc9d40a3mmx4drpkz7ywl0z4z7fj2x8475twaqj860gez5kjfuppq08cpyk" - }, - { - "validator_name": "Cypher Core", - "validator_address": "cosmosvaloper19q0mkw7jnx9k4njxqeurc0qmg3scpenm4mhtym", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqcdav5ylt2zst90qmuh8e5w07xmxv9y6wufp5k9ngzmx7v9qewqtqkcq4z8" - }, - { - "validator_name": "Dawns.World", - "validator_address": "cosmosvaloper1ktecz4dr56j9tsfh7nwg8s9suvhfu70qykhu5s", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepq4euv7ertqhgvxrla583fg9g6z2v2dzrkl9spche4j4r23vukmx2q8gqvev" - }, - { - "validator_name": "DelegaNetworks", - "validator_address": "cosmosvaloper1uutuwrwt3z2a5z8z3uasml3rftlpmu25aga5c6", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqarrl0ppddzyczwvcqwf3jwd9qwkhxfy6lcv8ep4msk293mlxg39qgf77y3" - }, - { - "validator_name": "DokiaCapital", - "validator_address": "cosmosvaloper14lultfckehtszvzw4ehu0apvsr77afvyju5zzy", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqr2mcvpt43a5fspxyse04c4fe8rdmg90vqv67praxt4jzqmep0myqjpq7kk" - }, - { - "validator_name": "DragonStake", - "validator_address": "cosmosvaloper15r4tc0m6hc7z8drq3dzlrtcs6rq2q9l2nvwher", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqzmcxc8tdmqsydmn8uw3qvn28frswr3kfzdy0nxm4f6z0fgqnq66qlw76kz" - }, - { - "validator_name": "Easy 2 Stake", - "validator_address": "cosmosvaloper1e0plfg475phrsvrlzw8gwppeva0zk5yg9fgg8c", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqqxp9kjnlcefa9ek8gxm9zw3dcn7kq69yquc3pq6530936a9rt04qsj8hcr" - }, - { - "validator_name": "Everstake", - "validator_address": "cosmosvaloper1tflk30mq5vgqjdly92kkhhq3raev2hnz6eete3", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqwcvy8hyw2phdp080ggj7prxv972rvqc9gwyjnl0uwf7uxn63s8vqdctdcw" - }, - { - "validator_name": "Figment Networks", - "validator_address": "cosmosvaloper1hjct6q7npsspsg3dgvzk3sdf89spmlpfdn6m9d", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepq7dj6qx6rwajkd6gfj4lgsu4kcrnvrmcxhxn8el9au0kezkkx88dsp28une" - }, - { - "validator_name": "Forbole", - "validator_address": "cosmosvaloper14kn0kk33szpwus9nh8n87fjel8djx0y070ymmj", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqgtdccmpdht8s4stcym3nm6yeswxfajg5pk6469kyfy5q6jdpyxfq5lqym0" - }, - { - "validator_name": "Genesis Lab", - "validator_address": "cosmosvaloper1wdrypwex63geqswmcy5qynv4w3z3dyef2qmyna", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqx83cedysnrarkqkslc7ndqxxxh8yn2qp96qzxafavqfu8q6xttqsydcu2z" - }, - { - "validator_name": "HashQuark", - "validator_address": "cosmosvaloper1cgh5ksjwy2sd407lyre4l3uj2fdrqhpkzp06e6", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqxf9u7a6wmlcmyhwzmzhr4eae9adftl827ydmd6a7knjprxxaz48shg2wgv" - }, - { - "validator_name": "in3s.com", - "validator_address": "cosmosvaloper1rcp29q3hpd246n6qak7jluqep4v006cdsc2kkl", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepq7mft6gfls57a0a42d7uhx656cckhfvtrlmw744jv4q0mvlv0dypskehfk8" - }, - { - "validator_name": "Iqlusion", - "validator_address": "cosmosvaloper1grgelyng2v6v3t8z87wu3sxgt9m5s03xfytvz7", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqmhqcl5mkke83u5l5fxxq2l40wjj6cs6jmun29kwe6d3u3ttsv55qa9933a" - }, - { - "validator_name": "IRISnet-Bianjie", - "validator_address": "cosmosvaloper1ssm0d433seakyak8kcf93yefhknjleeds4y3em", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqrgyyjxpe0ujefxwnkpmqz9m0hj03y09tdz9lwc0s7mvy469hulfq69f8sd" - }, - { - "validator_name": "Jedi-St3", - "validator_address": "cosmosvaloper1wg4s5kd0le7szxwuvvhykshdksex00n0et9qg6", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepq78mlh5kmv08zlvl4m8unjdcurnxcws0sey8hwxcfsur3wyh8ppzqa7axj6" - }, - { - "validator_name": "KalpaTech", - "validator_address": "cosmosvaloper1ehkfl7palwrh6w2hhr2yfrgrq8jetgucudztfe", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepq9ek5tt80qfjjecrzhsjrrf7ju8e23w9sk9lp8w8jgnfcx9uy62uqc3v94u" - }, - { - "validator_name": "kochacolaj", - "validator_address": "cosmosvaloper1emaa7mwgpnpmc7yptm728ytp9quamsvu837nc0", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqfuxvufupnsm7v5anpwd7z8ec70z2k209j7xclnm25zz7vauhyc5qjgxx3h" - }, - { - "validator_name": "Kytzu", - "validator_address": "cosmosvaloper1wtv0kp6ydt03edd8kyr5arr4f3yc52vp3u2x3u", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqwlrgy663rh8usd99p7j2zqqc53a58y3wr9x0nd4fvpee0cg6kcpqnjxgm3" - }, - { - "validator_name": "KysenPool Sky", - "validator_address": "cosmosvaloper146kwpzhmleafmhtaxulfptyhnvwxzlvm87hwnm", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqsxcxw7nrmkweuav4k5z2gjvf205cvymaljum6kv0sj7srn8wlveq7r5rvx" - }, - { - "validator_name": "lunamint", - "validator_address": "cosmosvaloper1ec3p6a75mqwkv33zt543n6cnxqwun37rr5xlqv", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqzcdfkcypa7n4zcnng2e6ulr8y3rwa9pyj9zyfdah3ysm0euszszs4y7zwg" - }, - { - "validator_name": "melea", - "validator_address": "cosmosvaloper1zqgheeawp7cmqk27dgyctd80rd8ryhqs6la9wc", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepq8lvj0ezesq704yjrz5m6qvg0f4xfgwur2tdx8y7zmmk6khv6pyjsz9txcl" - }, - { - "validator_name": "melange", - "validator_address": "cosmosvaloper1cz6q5nys0tgupq2mt99amsltepglxqgwfs6fum", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqvsla2hfkxr39fv2kscvtgeeu7qm2jw2kuxg64juwrpmymud0mcasnzpjjc" - }, - { - "validator_name": "nylira.net", - "validator_address": "cosmosvaloper13sduv92y3xdhy3rpmhakrc3v7t37e7ps9l0kpv", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqv43m43y8k4vl677kcrln50hxc099rt5vwdnqsk6qz95a94t0xnlsn3d5ue" - }, - { - "validator_name": "P2P.ORG", - "validator_address": "cosmosvaloper132juzk0gdmwuxvx4phug7m3ymyatxlh9734g4w", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepq8qrnv9ewmps46gyjqyzajmtay7hrj5dz7fsxh4qc7p3xnwumvrls38dv5r" - }, - { - "validator_name": "Ping Pub", - "validator_address": "cosmosvaloper1jxv0u20scum4trha72c7ltfgfqef6nsch7q6cu", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqvwa5jswyy32vm26agf42jtfsazxssxem8jkntucwkq9p6lj2szzsklqn0f" - }, - { - "validator_name": "replicator.network", - "validator_address": "cosmosvaloper1et77usu8q2hargvyusl4qzryev8x8t9wwqkxfs", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqehfdumq50qgtuqd3xqf0pz37g0vzkflnnpe62e7w02ylvvuwnvgqskhmz7" - }, - { - "validator_name": "Sentinel", - "validator_address": "cosmosvaloper1u6ddcsjueax884l3tfrs66497c7g86skn7pa0u", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepq09kyxsdw0p3ammk3hglv2kjteyxecla4a84mv3tw5f3haj8kpvnslxp0mc" - }, - { - "validator_name": "SG-1", - "validator_address": "cosmosvaloper1zsq22eqgrswuhmcul2fun7ntgrzjgph3kqf3a3", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqmsgxdazcclelnxz5hj8utj0a8cr78fnvyfe0y97y2yy282p5vukqf04vmg" - }, - { - "validator_name": "sifchain.finance", - "validator_address": "cosmosvaloper1z2hjx8ae4we5vu2p4tctuj6q2vl7ucgmte422a", - "stargate_consensus_public_key": "cosmosvalconspub1addwnpepqtjj6sfrpd3wrdvnwja9mpf84avzmfrqnmjlp7t7krzr49pgtawqudfaen9" - }, - { - "validator_name": "Sikka", - "validator_address": "cosmosvaloper1ey69r37gfxvxg62sh4r0ktpuc46pzjrm873ae8", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepq0d06rdx49rj2uzsze4ww82et986fhl2ahcxtj7c56pr2rmcp5ylsqzhnqt" - }, - { - "validator_name": "Simply Staking", - "validator_address": "cosmosvaloper124maqmcqv8tquy764ktz7cu0gxnzfw54n3vww8", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqyngml5ydtd2d3u62sknrwfx7xk3c3z4tl784ekeneax8hpg69uvs84rvu7" - }, - { - "validator_name": "Skystar Capital", - "validator_address": "cosmosvaloper1uhnsxv6m83jj3328mhrql7yax3nge5svrv6t6c", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqzt04jd95e5dtmd0f6gc3xc8l8au8ps22cc5wnxm5chcqnyn8qs3s068afw" - }, - { - "validator_name": "Stake Capital", - "validator_address": "cosmosvaloper1k9a0cs97vul8w2vwknlfmpez6prv8klv03lv3d", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqfgpyq4xk4s96ksmkfrr7juea9kmdxkl5ht94xgpxe240743u9cvsht489p" - }, - { - "validator_name": "🐠stake.fish", - "validator_address": "cosmosvaloper1sjllsnramtg3ewxqwwrwjxfgc4n4ef9u2lcnj0", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqezmxjhv4yzemce7qul2aqzmm6gd6f6r6ruxc7kxk6tcr6w6zud3qfuhm2r" - }, - { - "validator_name": "stake.systems", - "validator_address": "cosmosvaloper1ualhu3fjgg77g485gmyswkq3w0dp7gys6qzwrv", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqn0ekk3nslmz6h5trqq6twrf0e8n3sfxk2kmgkp474yakwf5lr7rq6eqatw" - }, - { - "validator_name": "StakeWithUs", - "validator_address": "cosmosvaloper1jlr62guqwrwkdt4m3y00zh2rrsamhjf9num5xr", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqdlgkgec9wl5numexjgmkjd9ulytntwnrjp5phl2xy2c95f0r9hgqlsf8g9" - }, - { - "validator_name": "stakezone", - "validator_address": "cosmosvaloper1g2lpsy85795r2fgtrkgzk25m5wkrtl2xu2t3ya", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepq5rtnxycmx2ynyj6wy6frr7c4xz4pzjn6f7wyr6ytk9ngh56lqs6qth238x" - }, - { - "validator_name": "Stakin", - "validator_address": "cosmosvaloper1fhr7e04ct0zslmkzqt9smakg3sxrdve6ulclj2", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepq2vrwk0dhn0kh443jxlgscd7luknvl56h3dr0pyfglxwdnhquae9srek6ew" - }, - { - "validator_name": "Staking Fund", - "validator_address": "cosmosvaloper1000ya26q2cmh399q4c5aaacd9lmmdqp90kw2jn", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepq699g9qfymy2ph9a2r5s203nh9sdvlhgk5nq0cupvm7wt0y2gpe8qpuxtf0" - }, - { - "validator_name": "Staking Facilities", - "validator_address": "cosmosvaloper1x88j7vp2xnw3zec8ur3g4waxycyz7m0mahdv3p", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqhm6gjjkwecqyfrgey96s5up7drnspnl4t3rdr79grklkg9ff6zaqnfl2dg" - }, - { - "validator_name": "syncnode", - "validator_address": "cosmosvaloper19j2hd230c3hw6ds843yu8akc0xgvdvyuz9v02v", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqwhtsh6jmaf0yykyvkyzlp22xvdm2htj9j623ktes35vpw8f3hfds83kwpn" - }, - { - "validator_name": "TomShi", - "validator_address": "cosmosvaloper10sr2e9tcw5fl4tknht7j8hzw57k5s3f4zzpv88", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepq4rqxjd5fjqps9mgs3snjdmpdllsew62f2yqpt83dnw0fn4cuddrsrc6m4g" - }, - { - "validator_name": "X-Stake", - "validator_address": "cosmosvalcons1qzkmn04l39v2t7q32sycm950zj0kj9jtj46vqf", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqy567dh9a7ak0tcl8n8u9lwh0xpp8vl7wlv9gjlf5d6uj7prvnjfsyaur90" - }, - { - "validator_name": "WeStaking", - "validator_address": "cosmosvaloper1ptyzewnns2kn37ewtmv6ppsvhdnmeapvtfc9y5", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqtcsm8lp7n6ph98vd59qa9esgyuysuntww9juz5wynxrhzpspmuuq6g5pzg" - }, - { - "validator_name": "Tessellated Geometry", - "validator_address": "cosmosvaloper1fun809ksxh87nzf88yashp9ynjz6xkscrtvzvw", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepq8axexx8w5wf64wufk20q083ycqgfdy50j03xg78ldzv6cu0n0xgq59sqhr" - }, - { - "validator_name": "Ztake.org", - "validator_address": "cosmosvaloper102ruvpv2srmunfffxavttxnhezln6fnc54at8c", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepq33h3ldxapw2rte5zsryatsr92jnmjpfhz54478dxhxkqj6pj9hasqvs6xr" - }, - { - "validator_name": "Iqlusion/Dokia-Binance Replacement", - "validator_address": "cosmosvaloper156gqf9837u7d4c4678yt3rl4ls9c5vuursrrzf", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepq4xreqv0nttcx5p75drnwsplry9p65g58lfcvwfg8vp8mhp7y3gtslmx3kj" - }, - { - "validator_name": "Iqlusion-Kraken Replacement", - "validator_address": "cosmosvaloper1nm0rrq86ucezaf8uj35pq9fpwr5r82clzyvtd8", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqat3p62jhqcply8ewmytamqqlhu786mg49vn8m87qrpc22h3yunrqtx28m2" - }, - { - "validator_name": "Iqlusion-Zero Knowledge Replacement", - "validator_address": "cosmosvaloper1v5y0tg0jllvxf5c3afml8s3awue0ymju89frut", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqxwgvllret5heclev4c04mmxa272vxa4vee5ptjx77faw8lpn0mmqcfa9ak" - }, - { - "validator_name": "Iqlusion-BlockPower Replacement", - "validator_address": "cosmosvaloper1rpgtz9pskr5geavkjz02caqmeep7cwwpv73axj", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqm27ul4l8v7dp2nvd69t6aycyelg03ps5jekn9nkqfrr8rx22ykmq0yj0xk" - }, - { - "validator_name": "Iqlusion-hashtower Replacement", - "validator_address": "cosmosvaloper1ma02nlc7lchu7caufyrrqt4r6v2mpsj90y9wzd", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqf7r6l2et8auspzslwmmhmtflpgea7hx68slz9qenlvvcqgt4dngqsjljtt" - }, - { - "validator_name": "Iqlusion-Houbi Replacement", - "validator_address": "cosmosvaloper12w6tynmjzq4l8zdla3v4x0jt8lt4rcz5gk7zg2", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepq6mzrl98emg03hsd48p9n4d2aa57seu82umcu674yg2cwtwqjjy7sfluw76" - }, - { - "validator_name": "Iqlusion-BouBouNode Replacement", - "validator_address": "cosmosvaloper1eh5mwu044gd5ntkkc2xgfg8247mgc56fz4sdg3", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqvpfpmattev2j57fpy56u34w7cjqqersueqrs6p4jfkm072dup5dsaqk02p" - }, - { - "validator_name": "Iqlusion-Coinbase", - "validator_address": "cosmosvaloper1a3yjj7d3qnx4spgvjcwjq9cw9snrrrhu5h6jll", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqxkmktpxalx2gwn7zkw4t3dtax5up3mfv4yhgmr876wyhkfq8fndq4vdh6d" - }, - { - "validator_name": "Iqlusion/Dokia-Polychain Labs Replacement", - "validator_address": "cosmosvaloper14k4pzckkre6uxxyd2lnhnpp8sngys9m6hl6ml7", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepq72wtvxz8pn7n7qkt3up0y4t0dqmjakgvhznc4n7l6gfqxpu08kxq79kpkk" - }, - { - "validator_name": "Iqlusion/Dokia-Sparkpool-Replacement", - "validator_address": "cosmosvaloper1rwh0cxa72d3yle3r4l8gd7vyphrmjy2kpe4x72", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepquunyzaqq79rx5gd743mrna2aa3han685xjzj970xpt764jgx2hpqmmruvk" - }, - { - "validator_name": "Iqlusion -MultiChain Ventures Replacement", - "validator_address": "cosmosvaloper1vf44d85es37hwl9f4h9gv0e064m0lla60j9luj", - "stargate_consensus_public_key": "cosmosvalconspub1zcjduepqaw46pnhjcv95mslvzxv5tze36ycutnx7szwln9p38ufrkffqltms8pntmu" - } -] diff --git a/contrib/testnets/upgrade-gaiad.sh b/contrib/testnets/upgrade-gaiad.sh deleted file mode 100755 index 1f920c02..00000000 --- a/contrib/testnets/upgrade-gaiad.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/sh -# upgrade-gaiad - example make call to upgrade gaiad on a set of nodes in AWS -# WARNING: Run it from the current directory - it uses relative paths to ship the binary and the genesis.json,config.toml files - -if [ $# -ne 1 ]; then - echo "Usage: ./upgrade-gaiad.sh " - exit 1 -fi -set -eux - -export CLUSTER_NAME=$1 - -make upgrade-gaiad - diff --git a/contrib/testnets/using-cleveldb.sh b/contrib/testnets/using-cleveldb.sh deleted file mode 100644 index 6e594330..00000000 --- a/contrib/testnets/using-cleveldb.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -make install GAIA_BUILD_OPTIONS="cleveldb" - -gaiad init "t6" --home ./t6 --chain-id t6 - -gaiad unsafe-reset-all --home ./t6 - -mkdir -p ./t6/data/snapshots/metadata.db - -gaiad keys add validator --keyring-backend test --home ./t6 - -gaiad add-genesis-account $(gaiad keys show validator -a --keyring-backend test --home ./t6) 100000000stake --keyring-backend test --home ./t6 - -gaiad gentx validator 100000000stake --keyring-backend test --home ./t6 --chain-id t6 - -gaiad collect-gentxs --home ./t6 - -gaiad start --db_backend cleveldb --home ./t6 diff --git a/docs/.gitignore b/docs/.gitignore deleted file mode 100644 index b2d6de30..00000000 --- a/docs/.gitignore +++ /dev/null @@ -1,20 +0,0 @@ -# Dependencies -/node_modules - -# Production -/build - -# Generated files -.docusaurus -.cache-loader - -# Misc -.DS_Store -.env.local -.env.development.local -.env.test.local -.env.production.local - -npm-debug.log* -yarn-debug.log* -yarn-error.log* diff --git a/docs/DOCS_README.md b/docs/DOCS_README.md deleted file mode 100644 index 543ffb71..00000000 --- a/docs/DOCS_README.md +++ /dev/null @@ -1,112 +0,0 @@ - - -# Updating the docs - -If you want to open a PR on Gaia to update the documentation, please follow the guidelines in the [`CONTRIBUTING.md`](https://github.com/cosmos/gaia/tree/main/CONTRIBUTING.md) - -## Internationalization - -- Translations for documentation live in a `docs/translations//` folder, where `` is the language code for a specific language. For example, `zh` for Chinese, `ko` for Korean, `es` for Spanish, etc. -- Each `docs/translations//` folder must follow the same folder structure within `docs/`, but only content in the following folders needs to be translated and included in the respective `docs/translations//` folder -- Each `docs/translations//` folder must also have a `README.md` that includes a translated version of both the layout and content within the root-level [`README.md`](https://github.com/cosmos/cosmos-sdk/tree/master/docs/README.md). The layout defined in the `README.md` is used to build the homepage. - -## Docs Build Workflow - -The documentation for Gaia is hosted at: - -- - -built from the files in this (`/docs`) directory for [main](https://github.com/cosmos/gaia/tree/main/docs). - -### How It Works - -There is a [Github Action](https://github.com/cosmos/gaia/blob/main/.github/workflows/docs.yml) -listening for changes in the `/docs` directory, on the `main` branch. -Any updates to files in this directory on that branch will automatically -trigger a website deployment. Under the hood, `make build-docs` is run from the -[Makefile](https://github.com/cosmos/gaia/blob/main/Makefile) in this repo. - -## README - -The [README.md](./README.md) is also the landing page for the documentation -on the website. During the Jenkins build, the current commit is added to the bottom -of the README. - -## Links - -**NOTE:** Strongly consider the existing links - both within this directory -and to the website docs - when moving or deleting files. - -Relative links should be used nearly everywhere, having discovered and weighed the following: - -### Relative - -Where is the other file, relative to the current one? - -- works both on GitHub and for the VuePress build -- confusing / annoying to have things like: `../../../../myfile.md` -- requires more updates when files are re-shuffled - -### Absolute - -Where is the other file, given the root of the repo? - -- works on GitHub, doesn't work for the VuePress build -- this is much nicer: `/docs/hereitis/myfile.md` -- if you move that file around, the links inside it are preserved (but not to it, of course) - -### Full - -The full GitHub URL to a file or directory. Used occasionally when it makes sense -to send users to the GitHub. - -## Building Locally - -To build and serve the documentation locally, make sure you're in the `docs` directory and run the following: - -Clear `node_modules` for a clean install. This is not necessary every time. - -```bash -rm -rf node_modules -``` - -Install project dependencies - -```bash -npm install -``` - -Serve the app - -```bash -npm run serve -``` - -then navigate to `localhost:3000` in your browser. - -To build documentation as a static website run `npm run build`. You will find the website in `build` directory. - -## Search - -We are using [Algolia](https://www.algolia.com) to power full-text search. This uses a public API search-only key in the `config.js` as well as a [cosmos_network.json](https://github.com/algolia/docsearch-configs/blob/master/configs/cosmos_network.json) configuration file that we can update with PRs. - -### Update and Build the RPC docs - -1. Execute the following command at the root directory to install the swagger-ui generate tool. - - ```bash - make tools - ``` - -2. Edit API docs - 1. Directly Edit API docs manually: `cmd/gaiad/swagger-ui/swagger.yaml`. - 2. Edit API docs within the [Swagger Editor](https://editor.swagger.io/). Please refer to this [document](https://swagger.io/docs/specification/2-0/basic-structure/) for the correct structure in `.yaml`. -3. Download `swagger.yaml` and replace the old `swagger.yaml` under fold `cmd/gaiad/swagger-ui`. -4. Compile gaiad - - ```bash - make install - ``` diff --git a/docs/README copy.md b/docs/README copy.md deleted file mode 100644 index 420d6daa..00000000 --- a/docs/README copy.md +++ /dev/null @@ -1,40 +0,0 @@ - - -# Cosmos Hub Documentation - -Welcome to the documentation of the **Cosmos Hub application: `gaia`**. - -## What is Gaia? - -- [Intro to the `gaia` software](./getting-started/what-is-gaia.md) -- [Interacting with the `gaiad` binary](./hub-tutorials/gaiad.md) - -## Join the Cosmos Hub Mainnet - -- [Install the `gaia` application](./getting-started/installation.md) -- [Set up a full node and join the mainnet](./hub-tutorials/join-mainnet.md) -- [Upgrade to a validator node](./validators/validator-setup.md) - -## Join the Cosmos Hub Public Testnet - -- [Join the testnet](./hub-tutorials/join-testnet.md) - -## Setup Your Own `gaia` Testnet - -- [Setup your own `gaia` testnet](https://github.com/cosmos/testnets/tree/master/local/previous-local-testnets/v7-theta) - -## Additional Resources - -- [Validator Resources](./validators/README.md): Contains documentation for `gaia` validators. -- [Delegator Resources](./delegators/README.md): Contains documentation for delegators. -- [Other Resources](./resources/README.md): Contains documentation on `gaiad`, genesis file, service providers, ledger wallets, ... -- [Cosmos Hub Archives](./resources/archives.md): State archives of past iteration of the Cosmos Hub. - -# Contribute - -See [this file](./DOCS_README.md) for details of the build process and -considerations when making changes. diff --git a/docs/README.md b/docs/README.md index 420d6daa..de5ac995 100644 --- a/docs/README.md +++ b/docs/README.md @@ -4,37 +4,6 @@ parent: layout: home --> -# Cosmos Hub Documentation +# GovGen Documentation -Welcome to the documentation of the **Cosmos Hub application: `gaia`**. - -## What is Gaia? - -- [Intro to the `gaia` software](./getting-started/what-is-gaia.md) -- [Interacting with the `gaiad` binary](./hub-tutorials/gaiad.md) - -## Join the Cosmos Hub Mainnet - -- [Install the `gaia` application](./getting-started/installation.md) -- [Set up a full node and join the mainnet](./hub-tutorials/join-mainnet.md) -- [Upgrade to a validator node](./validators/validator-setup.md) - -## Join the Cosmos Hub Public Testnet - -- [Join the testnet](./hub-tutorials/join-testnet.md) - -## Setup Your Own `gaia` Testnet - -- [Setup your own `gaia` testnet](https://github.com/cosmos/testnets/tree/master/local/previous-local-testnets/v7-theta) - -## Additional Resources - -- [Validator Resources](./validators/README.md): Contains documentation for `gaia` validators. -- [Delegator Resources](./delegators/README.md): Contains documentation for delegators. -- [Other Resources](./resources/README.md): Contains documentation on `gaiad`, genesis file, service providers, ledger wallets, ... -- [Cosmos Hub Archives](./resources/archives.md): State archives of past iteration of the Cosmos Hub. - -# Contribute - -See [this file](./DOCS_README.md) for details of the build process and -considerations when making changes. +Welcome to the documentation of the **AtomOne application: `atomone`**. diff --git a/docs/docs/architecture/adr/PROCESS.md b/docs/architecture/PROCESS.md similarity index 96% rename from docs/docs/architecture/adr/PROCESS.md rename to docs/architecture/PROCESS.md index a547cc29..6779a06b 100644 --- a/docs/docs/architecture/adr/PROCESS.md +++ b/docs/architecture/PROCESS.md @@ -1,7 +1,7 @@ # ADR Creation Process 1. Copy the `adr-template.md` file. Use the following filename pattern: `adr-next_number-title.md` -2. Create a draft Pull Request and solicit input from the stewarding team, if you want to get an early feedback. +2. Create a draft Pull Request and solicit input from the codeowners, if you want to get an early feedback. 3. Make sure that the problem, the context and a recommended solution is clear and well documented. Be sure to document alternate solution spaces and give reasons why they have been discarded. 4. Add an entry to a list in the README file [Table of Contents](./README.md#adr-table-of-contents). 5. Create a Pull Request to propose a new ADR. diff --git a/docs/docs/architecture/adr/README.md b/docs/architecture/README.md similarity index 92% rename from docs/docs/architecture/adr/README.md rename to docs/architecture/README.md index bbf11bd7..f007e649 100644 --- a/docs/docs/architecture/adr/README.md +++ b/docs/architecture/README.md @@ -7,7 +7,7 @@ parent: # Architecture Decision Records (ADR) -This is a location to record all high-level architecture decisions for new feature and module proposals in the Cosmos Hub. +This is a location to record all high-level architecture decisions for new feature and module proposals in the GovGen. An Architectural Decision (**AD**) is a software design choice that addresses a functional or non-functional requirement that is architecturally significant. An Architecturally Significant Requirement (**ASR**) is a requirement that has a measurable effect on a software system’s architecture and quality. @@ -49,7 +49,7 @@ They are to be interpreted as described in [RFC 2119](https://datatracker.ietf.o ### Accepted -- [ADR 002: Globalfee Module](./adr-002-globalfee.md) +- n/a ### Proposed @@ -61,4 +61,4 @@ They are to be interpreted as described in [RFC 2119](https://datatracker.ietf.o ### Rejected -- [ADR 001: Interchain Accounts](./adr-001-interchain-accounts.md) +- n/a diff --git a/docs/docs/architecture/templates/adr-template.md b/docs/architecture/adr-template.md similarity index 91% rename from docs/docs/architecture/templates/adr-template.md rename to docs/architecture/adr-template.md index e49f676f..95e5a588 100644 --- a/docs/docs/architecture/templates/adr-template.md +++ b/docs/architecture/adr-template.md @@ -12,7 +12,7 @@ order: false {DRAFT | PROPOSED} Not Implemented -> Please have a look at the [PROCESS](/architecture/adr/PROCESS.md#adr-status) page. +> Please have a look at the [PROCESS](./PROCESS.md#adr-status) page. > Use DRAFT if the ADR is in a draft stage (draft PR) or PROPOSED if it's in review. ## Abstract diff --git a/docs/babel.config.js b/docs/babel.config.js deleted file mode 100644 index e00595da..00000000 --- a/docs/babel.config.js +++ /dev/null @@ -1,3 +0,0 @@ -module.exports = { - presets: [require.resolve('@docusaurus/core/lib/babel/preset')], -}; diff --git a/docs/build.sh b/docs/build.sh deleted file mode 100755 index 01f7e70a..00000000 --- a/docs/build.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/sh - -# This builds the docs.cosmos.network docs using docusaurus. -# Old documentation, is not migrated, but is still available at the appropriate release tag. - -# Get the current commit hash, usually should be main. -COMMIT=$(git rev-parse HEAD) -mkdir -p ~/versioned_docs ~/versioned_sidebars - -# Build docs for each version tag in versions.json. -for version in $(jq -r .[] versions.json); do - echo ">> Building docusaurus $version docs" - (git clean -fdx && git reset --hard && git checkout $version && npm install && npm run docusaurus docs:version $version) - mv ./versioned_docs/* ~/versioned_docs/ - mv ./versioned_sidebars/* ~/versioned_sidebars/ - echo ">> Finished building docusaurus $version docs" -done - -# Build docs for $COMMIT that we started on. -echo ">> Building docusaurus main docs" -(git clean -fdx && git reset --hard && git checkout $COMMIT) -mv ~/versioned_docs ~/versioned_sidebars . -npm ci && npm run build -mv build ~/output - -echo ">> Finished building docusaurus main docs" -exit 0 \ No newline at end of file diff --git a/docs/docs/architecture/PROCESS.md b/docs/docs/architecture/PROCESS.md deleted file mode 100644 index 120f8072..00000000 --- a/docs/docs/architecture/PROCESS.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -title: ADR Creation Process -order: 2 ---- - -1. Copy the `adr-template.md` file. Use the following filename pattern: `adr-next_number-title.md` -2. Create a draft Pull Request and solicit input from the stewarding team, if you want to get an early feedback. -3. Make sure that the problem, the context and a recommended solution is clear and well documented. Be sure to document alternate solution spaces and give reasons why they have been discarded. -4. Add an entry to a list in the README file [Table of Contents](./README.md#adr-table-of-contents). -5. Create a Pull Request to propose a new ADR. - -## ADR life cycle - -ADR creation is an **iterative** process. Instead of trying to solve all decisions in a single ADR pull request, we MUST firstly understand the problem and collect feedback through a GitHub Issue. - -1. Every proposal SHOULD start with a new GitHub Issue or be a result of existing Issues. The Issue should contain just a brief proposal summary. - -2. Once the motivation is validated, a GitHub Pull Request (PR) is created with a new document based on the `adr-template.md`. - -3. An ADR doesn't have to arrive to `main` with an _accepted_ status in a single PR. If the motivation is clear and the solution is sound, we SHOULD be able to merge it and keep a _proposed_ status. It's preferable to have an iterative approach rather than long, not merged Pull Requests. - -4. If a _proposed_ ADR is merged, then it should clearly document outstanding issues either in ADR document notes or in a GitHub Issue. - -5. The PR SHOULD always be merged. In the case of a faulty ADR, we still prefer to merge it with a _rejected_ status. The only time the ADR SHOULD NOT be merged is if the author abandons it. - -6. Merged ADRs SHOULD NOT be deleted. - -### ADR status - -Status has two components: - -```text -{CONSENSUS STATUS} {IMPLEMENTATION STATUS} -``` - -IMPLEMENTATION STATUS is either `Implemented` or `Not Implemented`. - -#### Consensus Status - -```mermaid -flowchart TD - A[DRAFT] --> B[PROPOSED] - B --> C[LAST CALL YYYY-MM-DD] - B --> D[ABANDONED] - C --> E[ACCEPTED or REJECTED] - E --> F[SUPERSEDED by ADR-xxx] -``` - -* `DRAFT`: [optional] an ADR which is work in progress, not being ready for a general review. This is to present an early work and get an early feedback in a Draft Pull Request form. -* `PROPOSED`: an ADR covering a full solution architecture and still in the review - project stakeholders haven't reached an agreement yet. -* `LAST CALL `: [optional] clear notify that we are close to accept updates. Changing a status to `LAST CALL` means that social consensus (of Cosmos SDK maintainers) has been reached and we still want to give it a time to let the community react or analyze. -* `ACCEPTED`: ADR which will represent a currently implemented or to be implemented architecture design. -* `REJECTED`: ADR can go from PROPOSED or ACCEPTED to rejected if the consensus among project stakeholders will decide so. -* `SUPERSEEDED by ADR-xxx`: ADR which has been superseded by a new ADR. -* `ABANDONED`: the ADR is no longer pursued by the original authors. - -## Language used in ADR - -* The context/background should be written in the present tense. -* Avoid using a first, personal form. diff --git a/docs/docs/architecture/README.md b/docs/docs/architecture/README.md deleted file mode 100644 index e9a5f750..00000000 --- a/docs/docs/architecture/README.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -title: Architecture Decision Records (ADR) -order: 1 ---- - -This is a location to record all high-level architecture decisions for new feature and module proposals in the Cosmos Hub. - -An Architectural Decision (**AD**) is a software design choice that addresses a functional or non-functional requirement that is architecturally significant. -An Architecturally Significant Requirement (**ASR**) is a requirement that has a measurable effect on a software system’s architecture and quality. -An Architectural Decision Record (**ADR**) captures a single AD, such as often done when writing personal notes or meeting minutes; the collection of ADRs created and maintained in a project constitute its decision log. All these are within the topic of Architectural Knowledge Management (AKM). - -You can read more about the ADR concept [here](https://adr.github.io/). - -## Rationale - -ADRs are intended to be the primary mechanism for proposing new feature designs and new processes, for collecting community input on an issue, and for documenting the design decisions. -An ADR should provide: - -- Context on the relevant goals and the current state -- Proposed changes to achieve the goals -- Summary of pros and cons -- Discarded solution spaces and why they were discarded -- References -- Changelog - -Note the distinction between an ADR and a spec. The ADR provides the context, intuition, reasoning, and -justification for a change in architecture, or for the architecture of something -new. The spec is much more compressed and streamlined summary of everything as -it stands today. - -If recorded decisions turn out to be lacking, convene a discussion, record the new decisions here, and then modify the code to match. - -## Creating new ADR - -Read about the [PROCESS](./PROCESS.md). - -### Use RFC 2119 Keywords - -When writing ADRs, follow the same best practices for writing RFCs. -When writing RFCs, key words are used to signify the requirements in the specification. -These words are often capitalized: "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL. -They are to be interpreted as described in [RFC 2119](https://datatracker.ietf.org/doc/html/rfc2119). - -## ADR Table of Contents - -### Accepted - -- [ADR 002: Globalfee Module](./adr/adr-002-globalfee.md) - -### Proposed - -- n/a - -### Draft - -- n/a - -### Rejected - -- [ADR 001: Interchain Accounts](./adr/adr-001-interchain-accounts.md) diff --git a/docs/docs/architecture/_category_.json b/docs/docs/architecture/_category_.json deleted file mode 100644 index ea83e76f..00000000 --- a/docs/docs/architecture/_category_.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "label": "ADRs", - "position": 10, - "link": { "type": "doc", "id": "architecture/README" } -} \ No newline at end of file diff --git a/docs/docs/architecture/adr/_category_.json b/docs/docs/architecture/adr/_category_.json deleted file mode 100644 index 4fea2fa7..00000000 --- a/docs/docs/architecture/adr/_category_.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "label": "Current ADR's", - "position": 1, - "link": null -} \ No newline at end of file diff --git a/docs/docs/architecture/adr/adr-001-interchain-accounts.md b/docs/docs/architecture/adr/adr-001-interchain-accounts.md deleted file mode 100644 index 30f0b57a..00000000 --- a/docs/docs/architecture/adr/adr-001-interchain-accounts.md +++ /dev/null @@ -1,94 +0,0 @@ - - -# ADR 001: Interchain Accounts - -## Changelog - -- 2022-02-04: added content -- 2022-01-19: init -- 2023-06-28: mark as rejected - -## Status - -REJECTED Not Implemented - -**Reason:** The IBC team decided to integrate this functionality directly into their codebase and maintain it, because multiple users require it. - -## Abstract - -This is the Core Interchain Accounts Module. It allows the Cosmos Hub to act as a host chain with interchain accounts that are controlled by external IBC connected "Controller" blockchains. Candidate chains include Umee, Quicksilver, Sommelier. It is also a necessary component for a Authentication Module that allows the Cosmos Hub to act as a Controller chain as well. This will be recorded in a separate ADR. - -## Rationale - -This allows the Hub to participate in advanced cross-chain defi operations, like Liquid Staking and various protocol controlled value applications. - -## Desired Outcome - -The hub can be used trustlessly as a host chain in the configuration of Interchain Accounts. - -## Consequences - -There has been preliminary work done to understand if this increases any security feature of the Cosmos Hub. One thought was that this capability is similar to contract to contract interactions which are possible on virtual machine blockchains like EVM chains. Those interactions introduced a new attack vector, called a re-entrancy bug, which was the culprit of "The DAO hack on Ethereum". We believe there is no risk of these kinds of attacks with Interchain Accounts because they require the interactions to be atomic and Interchain Accounts are asynchronous. - -#### Backwards Compatibility - -This is the first of its kind. - -#### Forward Compatibility - -There are future releases of Interchain Accounts which are expected to be backwards compatible. - -## Technical Specification - -[ICS-27 Spec](https://github.com/cosmos/ibc/blob/master/spec/app/ics-027-interchain-accounts/README.md) - -## Development - -- Integration requirements - - Development has occured in [IBC-go](https://github.com/cosmos/ibc-go) and progress tracked on the project board there. -- Testing (Simulations, Core Team Testing, Partner Testing) - - Simulations and Core Team tested this module -- Audits (Internal Dev review, Third-party review, Bug Bounty) - - An internal audit, an audit from Informal Systems, and an audit from Trail of Bits all took place with fixes made to all findings. -- Networks (Testnets, Productionnets, Mainnets) - - Testnets - -## Governance [optional] - -- **Needs Signaling Proposal** -- Core Community Governance - - N/A -- Steering Community - - N/A. Possibly Aditya Srinpal, Sean King, Bez? -- Timelines & Roadmap - - Expected to be released as part of IBC 3.0 in Feb 2022 (currently in [beta release](https://github.com/cosmos/ibc-go/releases/tag/v3.0.0-beta1)) - -## Project Integrations [optional] - -- Gaia Integrations - - [PR](https://github.com/cosmos/gaia/pull/1150) -- Integration Partner - - IBC Team - -#### Downstream User Impact Report - -(Needs to be created) - -#### Upstream Partner Impact Report - -(Needs to be created) - -#### Inter-module Dependence Report - -(Needs to be created) - -## Support - -[Documentation](https://ibc.cosmos.network/main/apps/interchain-accounts/overview.html) - -## Additional Research & References - -- [Why Interchain Accounts Change Everything for Cosmos Interoperability](https://medium.com/chainapsis/why-interchain-accounts-change-everything-for-cosmos-interoperability-59c19032bf11) -- [Interchain Account Auth Module Demo Repo](https://github.com/cosmos/interchain-accounts) \ No newline at end of file diff --git a/docs/docs/architecture/adr/adr-002-globalfee.md b/docs/docs/architecture/adr/adr-002-globalfee.md deleted file mode 100644 index 0647da5e..00000000 --- a/docs/docs/architecture/adr/adr-002-globalfee.md +++ /dev/null @@ -1,165 +0,0 @@ -# ADR 002: Globalfee Module - -## Changelog -* 2023-06-12: Initial Draft - -## Status -ACCEPTED Implemented - -## Context - -The globalfee module was created to manage a parameter called `MinimumGasPricesParam`, which sets a network-wide minimum fee requirement. The intention was to stop random denominations from entering fee collections and to reduce the time validators take to check a long list of transaction fees. To address scenarios where no fee payment is required but the denominations for volunteered paid fees are still restricted, the zero coins was introduced to serve as a means of limiting the denoms. Nevertheless, the initial version of the globalfee module had some issues: - -- In the globalfee module, several Cosmos SDK coins methods were redefined because of the allowance of zero-value coins in the `MinimumGasPricesParam`. The `MinimumGasPricesParam` is of `sdk.DecCoins` type. In the Cosmos SDK, `sdk.DecCoins` are [sanitized](https://github.com/cosmos/cosmos-sdk/blob/67f04e629623d4691c4b2e48806f7793a3aa211e/types/dec_coin.go#L160-L177) to remove zero-value coins. As a result, several methods from `sdk.Coins` were [redefined in the Gaia fee antehandler](https://github.com/cosmos/gaia/blob/890ab3aa2e5788537b0d2ebc9bafdc968340e0e5/x/globalfee/ante/fee_utils.go#L46-L104). - -- `BypassMinFeeMsgTypes` exists in `app.toml`, which means each node can define its own value. Thus, it's not clear whether a transaction containing bypass-messages will be exempted from paying a fee. - -- The fee check logic is only executed in `CheckTx`. This could enable malicious validators to change the fee check code and propose transactions that do not meet the fee requirement. - -## Decision -To fix these problems, the following changes are added to the globalfee module: -- **ZeroCoins in `MinimumGasPricesParam`:**\ -Refactor the fee check logics, in order to use the Cosmos SDK coins' methods instead of the redefined methods. -- **Bypass Message Types:**\ -`BypassMinFeeMsgTypes` is refactored to be a param of the globalfee module, in order to make the bypass messages deterministic. -- **Check Fees in `DeliverTx`:**\ -The fee check is factored to executed in both `DeliverTx` and `CheckTx`. This is to prevent malicious validators from changing the fee check logic and allowing any transactions to pass fee check. As a consequence, `MinimumGasPricesParam` is introduced as a globalfee param. - -### ZeroCoins in `MinimumGasPricesParam` -#### Coins Split -`CombinedFeeRequirement` refers to the fee requirement that takes into account both `globalFees` (`MinimumGasPricesParam` in the globalfee module) and `localFees` (`minimum-gas-prices` in `app.toml`). This requirement is calculated as the maximum value between `globalFees` and `localFees` for denomination exists `globalFees`. -The allowance of zero coins in the `MinimumGasPricesParam` within the globalfee module implies that `CombinedFeeRequirement(globalFees, localFees)` also permits zero coins. Therefore, the `CombinedFeeRequirement` doesn't meet the requirements of certain `sdk.Coins` methods. For instance, the `DenomsSubsetOf` method requires coins that do not contain zero coins. - -To address this issue, the `CombinedFeeRequirement` and `feeCoins` are split as shown in the chart below. - -```mermaid ---- -title: Fee Requirements and Fee Splits ---- -flowchart TD - subgraph feeReq - A[CombinedFeeRequirement]-->B[/Split zero/nonzero coins/] - B-->|zero coins| C[zeroCoinFeesDenomReq]; - B-->|nonzero coins| D[nonzeroCoinFeesDenomReq]; - - end - - subgraph feeCoin - E[feeCoins]-->F[/Split by the denoms in zero/nonzero CoinFeesDenomReq/] - F-->|denoms in zeroCoinFeesDenomReq set| G[feeCoinsZeroDenom] - F-->|denoms in nonzeroCoinFeesDenomReq set| H[feeCoinsNonZeroDenom] - end -``` - -The `CombinedFeeRequirement` is split into zero and non-zero coins, forming `nonZeroCoinFeesReq` and `zeroCoinFeesDenomReq`. Similarly, the paid fees (feeCoins) are split into `feeCoinsNonZeroDenom` and `feeCoinsZeroDenom`, based on the denominations of `nonZeroCoinFeesReq` and `zeroCoinFeesDenomReq` as shown in the following code snippet. - -```go - nonZeroCoinFeesReq, zeroCoinFeesDenomReq := getNonZeroFees(feeRequired) - - // feeCoinsNonZeroDenom contains non-zero denominations from the feeRequired - // feeCoinsNonZeroDenom is used to check if the fees meets the requirement imposed by nonZeroCoinFeesReq - // when feeCoins does not contain zero coins' denoms in feeRequired - feeCoinsNonZeroDenom, feeCoinsZeroDenom := splitCoinsByDenoms(feeCoins, zeroCoinFeesDenomReq) - -``` -#### Fee Checks -The Workflow of feeCheck is shown below: -```mermaid ---- -title: Fee Check ---- -flowchart TD - -A[feeCoinsNonZeroDenom]-->B[/DenomsSubsetOf_nonZeroCoinFeesReq/]; -B-->|yes|C[is_bypass_msg]; -B-->|no|D((reject)); - -C-->|yes|pass1((pass)); -C-->|no|D[/contain_zeroCoinFeesDenomReq_denom/]; - -D-->|yes|pass2((pass)); -D-->|no|E[/feeCoinsZeroDenom_nonEmpty/]; - - -E-->|yes|pass3((pass)); -E-->|no|F[/IsAnyGTE_nonZeroCoinFeesDenomReq/]; - -F-->|yes|pass4((pass)); -F-->|no|reject2((reject)); -``` - -The split enable checking `feeCoinsNonZeroDenom` against `nonZeroCoinFeesReq`, and `feeCoinsZeroDenom` against -`zeroCoinFeesDenomReq` (as shown in the following code snippet). In the check of `feeCoinsNonZeroDenom` against `nonZeroCoinFeesReq`, the Cosmos SDK coins' methods can be used since zero coins are removed from the `nonZeroCoinFeesReq`, while in the check `feeCoinsZeroDenom` against `zeroCoinFeesDenomReq`, only denoms need to be checked. - -Checking `feeCoinsNonZeroDenom` against `nonZeroCoinFeesReq`: -```go - if !feeCoinsNonZeroDenom.IsAnyGTE(nonZeroCoinFeesReq) { - return ctx, sdkerrors.Wrapf(sdkerrors.ErrInsufficientFee, "insufficient fees; got: %s required: %s", feeCoins.String(), feeRequired.String()) - } -``` - -Here is an example of how the coins split and checked in fee antehandler:\ -**assumption**:\ -`globalfee=[1photon, 0uatom, 1stake]` and `local min-gas-prices=[0.5stake]` - -**fee requirement**:\ -`combinedFeeRequirement=[1photon, 0uatom, 1stake]` - -**split fee requirement**:\ -the `combinedFeeRequirement` into `nonZeroCoinFeesReq=[0uatom]`, and `nonZeroCoinFeesReq=[1photon, 1stake]` - -**split the paid fees**:\ -if `paidFee=[1uatom, 0.5photon]`, -the `splitCoinsByDenoms` splits the paidFee into `feeCoinsZeroDenom=[1uatom]` (the same denom as zero coins in `combinedFeeRequirement`), and `feeCoinsNonZeroDenom=[0.5stake]` -then `feeCoinsZeroDenom=[1uatom]` is checked by `nonZeroCoinFeesReq=[1photon, 1stake]`. - -Please note that `feeCoins` does not contain zero coins. The fee coins are split according to the denoms in `zeroCoinFeesDenomReq` or `nonZeroCoinFeesDenomReq`. If feeCoins contains coins not in both `zeroCoinFeesDenomReq` and `nonZeroCoinFeesDenomReq`, the transaction should be rejected. On the contrary, if feeCoins' denoms are in either `zeroCoinFeesDenomReq` or `nonZeroCoinFeesDenomReq`, and `len(zeroCoinFeesDenomReq)!=0`, the transaction can directly pass, otherwise, the fee amount need to be checked. - - -### Bypass Message Types -`BypassMinFeeMsgTypes` was a setup in `config/app.toml` before the refactor. `BypassMinFeeMsgTypes` is refactored to be a param of the globalfee module to get a network level agreement. Correspondingly,`MaxTotalBypassMinFeeMsgGasUsage` is also introduced as a globalfee param. - -### Fee Checks in `DeliverTx` -Implementing fee checks within the `DeliverTx` function introduces a few requirements: -- **Deterministic Minimum Fee Requirement**: For the `DeliverTx` process, it is essential to have a deterministic minimum fee requirement. In `CheckTx`, fee is checked by the `CombinedFeeRequirement(globalFees, localFees)`, which considers both `minimum-gas-prices` from `config/app.toml` and `MinimumGasPricesParam` from the globalfee Params (For more details, see [globalfee.md](/modules/globalfee.md)). `CombinedFeeRequirement` contains non-deterministic part: `minimum-gas-prices` from `app.toml`. Therefore, `CombinedFeeRequirement` cannot be used in `DeliverTx`. In `DeliverTx`, only `MinimumGasPricesParam` in globalfee Params is used for fee verification. The code implementation is shown below. - -```go -func (mfd FeeDecorator) GetTxFeeRequired(ctx sdk.Context, tx sdk.FeeTx) (sdk.Coins, error) { - // Get required global fee min gas prices - // Note that it should never be empty since its default value is set to coin={"StakingBondDenom", 0} - globalFees, err := mfd.GetGlobalFee(ctx, tx) - if err != nil { - return sdk.Coins{}, err - } - - // In DeliverTx, the global fee min gas prices are the only tx fee requirements. - if !ctx.IsCheckTx() { - return globalFees, nil - } - - // In CheckTx mode, the local and global fee min gas prices are combined - // to form the tx fee requirements - - // Get local minimum-gas-prices - localFees := GetMinGasPrice(ctx, int64(tx.GetGas())) - - // Return combined fee requirements - return CombinedFeeRequirement(globalFees, localFees) -} -``` - -- **Deterministic Bypass Parameters**: The decision of whether a message can bypass the minimum fee has to be deterministic as well. To ensure this, `BypassMinFeeMsgTypes` and `MaxTotalBypassMinFeeMsgGasUsage` parameters are moved to a persistent store. - -- **Module Initialization Order**: The genutils module must be initialized before the globalfee module. This is due to the `DeliverGenTxs` in the genutils module, is called during `initGenesis`. This function executes `DeliverTx`, which subsequently calls the AnteHandle in FeeDecorator, triggering the fee check in `DeliverTx`. - To prevent the `DeliverGenTxs` go through a fee check, the initialization of the globalfee module should occur after the genutils module. This sequencing ensures that all necessary components are in place when the fee check occurs. See [Gaia Issue #2489](https://github.com/cosmos/gaia/issues/2489) for more context. - -## Consequences -### Positive -This refactor results in code that is easier to maintain. It prevents malicious validators from escaping fee checks and make the bypass messages work at network level. -### Negative -The introduction of FeeDecorator has replaced the usage of `MempoolFeeDecorator` in the Cosmos SDK. Currently, if both FeeDecorator and MempoolFeeDecorator are added to the AnteDecorator chain, it will result in redundant checks. However, there's potential for FeeDecorator and MempoolFeeDecorator to become incompatible in the future, depending on updates to the Cosmos SDK. - - -## References - -* [Documentation of the globalfee module](/modules/globalfee.md) diff --git a/docs/docs/architecture/templates/_category_.json b/docs/docs/architecture/templates/_category_.json deleted file mode 100644 index 54bfe102..00000000 --- a/docs/docs/architecture/templates/_category_.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "label": "ADR Templates", - "position": 2, - "link": null -} \ No newline at end of file diff --git a/docs/docs/client/_category_.json b/docs/docs/client/_category_.json deleted file mode 100644 index 9bc3b236..00000000 --- a/docs/docs/client/_category_.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "label": "Client", - "position": 10, - "link": null -} \ No newline at end of file diff --git a/docs/docs/client/gaia/globalfee/v1beta1/query.swagger.json b/docs/docs/client/gaia/globalfee/v1beta1/query.swagger.json deleted file mode 100644 index 9f1da5b5..00000000 --- a/docs/docs/client/gaia/globalfee/v1beta1/query.swagger.json +++ /dev/null @@ -1,118 +0,0 @@ -{ - "swagger": "2.0", - "info": { - "title": "gaia/globalfee/v1beta1/query.proto", - "version": "version not set" - }, - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "paths": { - "/gaia/globalfee/v1beta1/params": { - "get": { - "operationId": "Params", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/gaia.globalfee.v1beta1.QueryParamsResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/grpc.gateway.runtime.Error" - } - } - }, - "tags": [ - "Query" - ] - } - } - }, - "definitions": { - "cosmos.base.v1beta1.DecCoin": { - "type": "object", - "properties": { - "denom": { - "type": "string" - }, - "amount": { - "type": "string" - } - }, - "description": "DecCoin defines a token with a denomination and a decimal amount.\n\nNOTE: The amount field is an Dec which implements the custom method\nsignatures required by gogoproto." - }, - "gaia.globalfee.v1beta1.Params": { - "type": "object", - "properties": { - "minimum_gas_prices": { - "type": "array", - "items": { - "$ref": "#/definitions/cosmos.base.v1beta1.DecCoin" - }, - "title": "minimum_gas_prices stores the minimum gas price(s) for all TX on the chain.\nWhen multiple coins are defined then they are accepted alternatively.\nThe list must be sorted by denoms asc. No duplicate denoms or zero amount\nvalues allowed. For more information see\nhttps://docs.cosmos.network/main/modules/auth#concepts" - }, - "bypass_min_fee_msg_types": { - "type": "array", - "items": { - "type": "string" - }, - "description": "bypass_min_fee_msg_types defines a list of message type urls\nthat are free of fee charge." - }, - "max_total_bypass_min_fee_msg_gas_usage": { - "type": "string", - "format": "uint64", - "description": "max_total_bypass_min_fee_msg_gas_usage defines the total maximum gas usage\nallowed for a transaction containing only messages of types in bypass_min_fee_msg_types\nto bypass fee charge." - } - }, - "description": "Params defines the set of module parameters." - }, - "gaia.globalfee.v1beta1.QueryParamsResponse": { - "type": "object", - "properties": { - "params": { - "$ref": "#/definitions/gaia.globalfee.v1beta1.Params" - } - }, - "description": "QueryMinimumGasPricesResponse is the response type for the\nQuery/MinimumGasPrices RPC method." - }, - "google.protobuf.Any": { - "type": "object", - "properties": { - "type_url": { - "type": "string" - }, - "value": { - "type": "string", - "format": "byte" - } - } - }, - "grpc.gateway.runtime.Error": { - "type": "object", - "properties": { - "error": { - "type": "string" - }, - "code": { - "type": "integer", - "format": "int32" - }, - "message": { - "type": "string" - }, - "details": { - "type": "array", - "items": { - "$ref": "#/definitions/google.protobuf.Any" - } - } - } - } - } -} diff --git a/docs/docs/delegators/README.md b/docs/docs/delegators/README.md deleted file mode 100644 index 019feefa..00000000 --- a/docs/docs/delegators/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Delegators -order: 1 ---- - -This folder contains documentation relevant to delegators of the Cosmos Hub and other `gaia` blockchains. - -- [Delegator CLI Guide](./delegator-guide-cli.md) -- [Delegators FAQ](./delegator-faq.md) -- [Delegator Security Notice](./delegator-security.md) diff --git a/docs/docs/delegators/_category_.json b/docs/docs/delegators/_category_.json deleted file mode 100644 index 3e1512df..00000000 --- a/docs/docs/delegators/_category_.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "label": "Delegators", - "position": 4, - "link": { "type": "doc", "id": "delegators/README" } -} \ No newline at end of file diff --git a/docs/docs/delegators/delegator-faq.md b/docs/docs/delegators/delegator-faq.md deleted file mode 100644 index 63cc8d87..00000000 --- a/docs/docs/delegators/delegator-faq.md +++ /dev/null @@ -1,98 +0,0 @@ ---- -title: Delegator FAQ -order: 4 ---- - -## What is a delegator? - -People that cannot or do not want to operate [validator nodes](/validators/overview.mdx) can still participate in the staking process as delegators. Indeed, validators are not chosen based on their self-delegated stake but based on their total stake, which is the sum of their self-delegated stake and of the stake that is delegated to them. This is an important property, as it makes delegators a safeguard against validators that exhibit bad behavior. If a validator misbehaves, their delegators will move their Atoms away from them, thereby reducing their stake. Eventually, if a validator's stake falls under the top 180 addresses with highest stake, they will exit the validator set. - -**Delegators share the revenue of their validators, but they also share the risks.** In terms of revenue, validators and delegators differ in that validators can apply a commission on the revenue that goes to their delegator before it is distributed. This commission is known to delegators beforehand and can only change according to predefined constraints (see [section](#choosing-a-validator) below). In terms of risk, delegators' Atoms can be slashed if their validator misbehaves. For more, see [Risks](#risks) section. - -To become delegators, Atom holders need to send a ["Delegate transaction"](./delegator-guide-cli.md#sending-transactions) where they specify how many Atoms they want to bond and to which validator. A list of validator candidates will be displayed in Cosmos Hub explorers. Later, if a delegator wants to unbond part or all of their stake, they needs to send an "Unbond transaction". From there, the delegator will have to wait 3 weeks to retrieve their Atoms. Delegators can also send a "Rebond Transaction" to switch from one validator to another, without having to go through the 3 weeks waiting period. - -For a practical guide on how to become a delegator, click [here](./delegator-guide-cli.md). - -## Choosing a validator - -In order to choose their validators, delegators have access to a range of information directly in [Lunie](https://lunie.io) or other Cosmos block explorers. - -- **Validator's moniker**: Name of the validator candidate. -- **Validator's description**: Description provided by the validator operator. -- **Validator's website**: Link to the validator's website. -- **Initial commission rate**: The commission rate on revenue charged to any delegator by the validator (see below for more detail). -- **Commission max change rate:** The maximum daily increase of the validator's commission. This parameter cannot be changed by the validator operator. -- **Maximum commission:** The maximum commission rate this validator candidate can charge. This parameter cannot be changed by the validator operator. -- **Validator self-bond amount**: A validator with a high amount of self-delegated Atoms has more skin-in-the-game than a validator with a low amount. - -## Directives of delegators - -Being a delegator is not a passive task. Here are the main directives of a delegator: - -- **Perform careful due diligence on validators before delegating.** If a validator misbehaves, part of their total stake, which includes the stake of their delegators, can be slashed. Delegators should therefore carefully select validators they think will behave correctly. -- **Actively monitor their validator after having delegated.** Delegators should ensure that the validators they delegate to behave correctly, meaning that they have good uptime, do not double sign or get compromised, and participate in governance. They should also monitor the commission rate that is applied. If a delegator is not satisfied with its validator, they can unbond or switch to another validator (Note: Delegators do not have to wait for the unbonding period to switch validators. Rebonding takes effect immediately). -- **Participate in governance.** Delegators can and are expected to actively participate in governance. A delegator's voting power is proportional to the size of their bonded stake. If a delegator does not vote, they will inherit the vote of their validator(s). If they do vote, they override the vote of their validator(s). Delegators therefore act as a counterbalance to their validators. - -## Revenue - -Validators and delegators earn revenue in exchange for their services. This revenue is given in three forms: - -- **Block provisions (Atoms):** They are paid in newly created Atoms. Block provisions exist to incentivize Atom holders to stake. The yearly inflation rate is calculated to target 2/3 bonded stake. If the total bonded stake in the network is less than 2/3 of the total Atom supply, inflation increases until it reaches 20%. If the total bonded stake is more than 2/3 of the Atom supply, inflation decreases until it reaches 7%. This means that if total bonded stake stays less than 2/3 of the total Atom supply for a prolonged period of time, unbonded Atom holders can expect their Atom value to deflate by 20% (compounded) per year. -- **Transaction fees (various tokens):** Each transfer on the Cosmos Hub comes with transactions fees. These fees can be paid in any currency that is whitelisted by the Hub's governance. Fees are distributed to bonded Atom holders in proportion to their stake. The first whitelisted token at launch is the ATOM. - -## Validator Commission - -Each validator receives revenue based on their total stake. Before this revenue is distributed to delegators, the validator can apply a commission. In other words, delegators have to pay a commission to their validators on the revenue they earn. Let us look at a concrete example: - -We consider a validator whose stake (i.e. self-delegated stake + delegated stake) is 10% of the total stake of all validators. This validator has 20% self-delegated stake and applies a commission of 10%. Now let us consider a block with the following revenue: - -- 990 Atoms in block provisions -- 10 Atoms in transaction fees. - -This amounts to a total of 1000 Atoms and 100 Photons to be distributed among all staking pools. - -Our validator's staking pool represents 10% of the total stake, which means the pool obtains 100 Atoms and 10 Photons. Now let us look at the internal distribution of revenue: - -- Commission = `10% * 80% * 100` Atoms = 8 Atoms -- Validator's revenue = `20% * 100` Atoms + Commission = 28 Atoms -- Delegators' total revenue = `80% * 100` Atoms - Commission = 72 Atoms - -Then, each delegator in the staking pool can claim their portion of the delegators' total revenue. - -## Liquid Staking - -The Liquid Staking module enacts a safety framework and associated governance-controlled parameters to regulate the adoption of liquid staking. - -The LSM mitigates liquid staking risks by limiting the total amount of ATOM that can be liquid staked to a percentage of all staked ATOM. As an additional risk-mitigation feature, the LSM introduces a requirement that validators self-bond ATOM to be eligible for delegations from liquid staking providers or to be eligible to mint LSM tokens. This mechanism is called the “validator bond”, and is technically distinct from the current self-bond mechanism, but functions similarly. - -At the same time, the LSM introduces the ability for staked ATOM to be instantly liquid staked, without having to wait for the unbonding period. - -The LSM enables users to instantly liquid stake their staked ATOM, without having to wait the twenty-one day unbonding period. This is important, because a very large portion of the ATOM supply is currently staked. Liquid staking ATOM that is already staked incurs a switching cost in the form of three weeks’ forfeited staking rewards. The LSM eliminates this switching cost. - -A user would be able to visit any liquid staking provider that has integrated with the LSM and click a button to convert her staked ATOM to liquid staked ATOM. It would be as easy as liquid staking unstaked ATOM. - -Technically speaking, this is accomplished by using something called an “LSM share.” Using the liquid staking module, a user can tokenize their staked ATOM and turn it into LSM shares. LSM shares can be redeemed for underlying staked tokens and are transferable. After staked ATOM is tokenized it can be immediately transferred to a liquid staking provider in exchange for liquid staking tokens - without having to wait for the unbonding period. - -### Toggling the ability to tokenize shares - -Currently the liquid staking module facilitates the immediate conversion of staked assets into liquid staked tokens. Despite the many benefits that come with this capability, it does inadvertently negate a protective measure available via traditional staking, where an account can stake their tokens to render them illiquid in the event that their wallet is compromised (the attacker would first need to unbond, then transfer out the tokens). - -Tokenization obviates this potential recovery measure, as an attacker could tokenize and immediately transfer staked tokens to another wallet. So, as an additional protective measure, the staking module permit accounts to selectively disable the tokenization of their stake with the `DisableTokenizeShares` message. - -The `DisableTokenizeShares` message is exposed by the staking module and can be executed as follows: - -```sh -gaiad tx staking disable-tokenize-shares --from mykey -``` - -When tokenization is disabled, a lock is placed on the account, effectively preventing the tokenization of any delegations. Re-enabling tokenization would initiate the removal of the lock, but the process is not immediate. The lock removal is queued, with the lock itself persisting throughout the unbonding period. Following the completion of the unbonding period, the lock would be completely removed, restoring the account's ablility to tokenize. For liquid staking protocols that enable the lock, this delay better positions the base layer to coordinate a recovery in the event of an exploit. - -## Risks - -Staking Atoms is not free of risk. First, staked Atoms are locked up, and retrieving them requires a 3 week waiting period called unbonding period. Additionally, if a validator misbehaves, a portion of their total stake can be slashed (i.e. destroyed). This includes the stake of their delegators. - -There is one main slashing condition: - -- **Double signing:** If someone reports on that a validator signed two different blocks with the same chain ID at the same height, this validator will get slashed. - -This is why Atom holders should perform careful due diligence on validators before delegating. It is also important that delegators actively monitor the activity of their validators. If a validator behaves suspiciously or is too often offline, delegators can choose to unbond from them or switch to another validator. **Delegators can also mitigate risk by distributing their stake across multiple validators.**s diff --git a/docs/docs/delegators/delegator-guide-cli.md b/docs/docs/delegators/delegator-guide-cli.md deleted file mode 100644 index 550da38f..00000000 --- a/docs/docs/delegators/delegator-guide-cli.md +++ /dev/null @@ -1,538 +0,0 @@ ---- -title: Delegator Guide (CLI) -order: 2 ---- - -This document contains all the necessary information for delegators to interact with the Cosmos Hub through the Command-Line Interface (CLI). - -It also contains instructions on how to manage accounts, restore accounts from the fundraiser and use a ledger nano device. - -:::warning -**Very Important**: Please assure that you follow the steps described hereinafter -carefully, as negligence in this significant process could lead to an indefinite -loss of your Atoms. Therefore, read through the following instructions in their -entirety prior to proceeding and reach out to us in case you need support. - -Please also note that you are about to interact with the Cosmos Hub, a -blockchain technology containing highly experimental software. While the -blockchain has been developed in accordance to the state of the art and audited -with utmost care, we can nevertheless expect to have issues, updates and bugs. -Furthermore, interaction with blockchain technology requires -advanced technical skills and always entails risks that are outside our control. -By using the software, you confirm that you understand the inherent risks -associated with cryptographic software (see also risk section of the -[Interchain Cosmos Contribution terms](https://github.com/cosmos/cosmos/blob/master/fundraiser/Interchain%20Cosmos%20Contribution%20Terms%20-%20FINAL.pdf)) and that the Interchain Foundation and/or -the Tendermint Team may not be held liable for potential damages arising out of the use of the -software. Any use of this open source software released under the Apache 2.0 license is -done at your own risk and on a "AS IS" basis, without warranties or conditions -of any kind. -::: - -Please exercise extreme caution! - -## Table of Contents - -- [Installing `gaiad`](#installing-gaiad) -- [Cosmos Accounts](#cosmos-accounts) - - [Restoring an Account from the Fundraiser](#restoring-an-account-from-the-fundraiser) - - [Creating an Account](#creating-an-account) -- [Accessing the Cosmos Hub Network](#accessing-the-cosmos-hub-network) - - [Running Your Own Full-Node](#running-your-own-full-node) - - [Connecting to a Remote Full-Node](#connecting-to-a-remote-full-node) -- [Setting Up `gaiad`](#setting-up-gaiad) -- [Querying the State](#querying-the-state) -- [Sending Transactions](#sending-transactions) - - [A Note on Gas and Fees](#a-note-on-gas-and-fees) - - [Bonding Atoms and Withdrawing Rewards](#bonding-atoms-and-withdrawing-rewards) - - [Participating in Governance](#participating-in-governance) - - [Signing Transactions from an Offline Computer](#signing-transactions-from-an-offline-computer) - -## Installing `gaiad` - -`gaiad`: This is the command-line interface to interact with a `gaiad` full-node. - -:::warning -**Please check that you download the latest stable release of `gaiad` that is available** -::: - -[**Download the binaries**] -Not available yet. - -[**Install from source**](../getting-started/installation.md) - -:::tip -`gaiad` is used from a terminal. To open the terminal, follow these steps: - -- **Windows**: `Start` > `All Programs` > `Accessories` > `Command Prompt` -- **MacOS**: `Finder` > `Applications` > `Utilities` > `Terminal` -- **Linux**: `Ctrl` + `Alt` + `T` -::: - -## Cosmos Accounts - -At the core of every Cosmos account, there is a seed, which takes the form of a 12 or 24-words mnemonic. From this mnemonic, it is possible to create any number of Cosmos accounts, i.e. pairs of private key/public key. This is called an HD wallet (see [BIP32](https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki) for more information on the HD wallet specification). - -```txt - Account 0 Account 1 Account 2 - -+------------------+ +------------------+ +------------------+ -| | | | | | -| Address 0 | | Address 1 | | Address 2 | -| ^ | | ^ | | ^ | -| | | | | | | | | -| | | | | | | | | -| | | | | | | | | -| + | | + | | + | -| Public key 0 | | Public key 1 | | Public key 2 | -| ^ | | ^ | | ^ | -| | | | | | | | | -| | | | | | | | | -| | | | | | | | | -| + | | + | | + | -| Private key 0 | | Private key 1 | | Private key 2 | -| ^ | | ^ | | ^ | -+------------------+ +------------------+ +------------------+ - | | | - | | | - | | | - +--------------------------------------------------------------------+ - | - | - +---------+---------+ - | | - | Mnemonic (Seed) | - | | - +-------------------+ -``` - -The funds stored in an account are controlled by the private key. This private key is generated using a one-way function from the mnemonic. If you lose the private key, you can retrieve it using the mnemonic. However, if you lose the mnemonic, you will lose access to all the derived private keys. Likewise, if someone gains access to your mnemonic, they gain access to all the associated accounts. - -:::warning -**Do not lose or share your 12 words with anyone. To prevent theft or loss of funds, it is best to ensure that you keep multiple copies of your mnemonic, and store it in a safe, secure place and that only you know how to access. If someone is able to gain access to your mnemonic, they will be able to gain access to your private keys and control the accounts associated with them.** -::: - -The address is a public string with a human-readable prefix (e.g. `cosmos10snjt8dmpr5my0h76xj48ty80uzwhraqalu4eg`) that identifies your account. When someone wants to send you funds, they send it to your address. It is computationally infeasible to find the private key associated with a given address. - -### Restoring an Account from the Fundraiser - -:::tip -*NOTE: This section only concerns fundraiser participants* -::: - -If you participated in the fundraiser, you should be in possession of a 12-words mnemonic. Newly generated mnemonics use 24 words, but 12-word mnemonics are also compatible with all the Cosmos tools. - -#### On a Ledger Device - -At the core of a ledger device, there is a mnemonic used to generate accounts on multiple blockchains (including the Cosmos Hub). Usually, you will create a new mnemonic when you initialize your ledger device. However, it is possible to tell the ledger device to use a mnemonic provided by the user instead. Let us go ahead and see how you can input the mnemonic you obtained during the fundraiser as the seed of your ledger device. - -:::warning -*NOTE: To do this, **it is preferable to use a brand new ledger device.**. Indeed, there can be only one mnemonic per ledger device. If, however, you want to use a ledger that is already initialized with a seed, you can reset it by going in `Settings`>`Device`>`Reset All`. **Please note that this will wipe out the seed currently stored on the device. If you have not properly secured the associated mnemonic, you could lose your funds!!!*** -::: - -The following steps need to be performed on an un-initialized ledger device: - -1. Connect your ledger device to the computer via USB -2. Press both buttons -3. Do **NOT** choose the "Config as a new device" option. Instead, choose "Restore Configuration" -4. Choose a PIN -5. Choose the 12 words option -6. Input each of the words you got during the fundraiser, in the correct order. - -Your ledger is now correctly set up with your fundraiser mnemonic! Do not lose this mnemonic! If your ledger is compromised, you can always restore a new device again using the same mnemonic. - -Next, click [here](#using-a-ledger-device) to learn how to generate an account. - -#### On a Computer - -:::warning -**NOTE: It is more secure to perform this action on an offline computer** -::: - -To restore an account using a fundraiser mnemonic and store the associated encrypted private key on a computer, use the following command: - -```bash -gaiad keys add --recover -``` - -- `` is the name of the account. It is a reference to the account number used to derive the key pair from the mnemonic. You will use this name to identify your account when you want to send a transaction. -- You can add the optional `--account` flag to specify the path (`0`, `1`, `2`, ...) you want to use to generate your account. By default, account `0` is generated. - -The private key of account `0` will be saved in your operating system's credentials storage. -Each time you want to send a transaction, you will need to unlock your system's credentials store. -If you lose access to your credentials storage, you can always recover the private key with the -mnemonic. - -:::tip -**You may not be prompted for password each time you send a transaction since most operating systems -unlock user's credentials store upon login by default. If you want to change your credentials -store security policies please refer to your operating system manual.** -::: - -### Creating an Account - -To create an account, you just need to have `gaiad` installed. Before creating it, you need to know where you intend to store and interact with your private keys. The best options are to store them in an offline dedicated computer or a ledger device. Storing them on your regular online computer involves more risk, since anyone who infiltrates your computer through the internet could exfiltrate your private keys and steal your funds. - -#### Using a Ledger Device - -:::warning -**Only use Ledger devices that you bought factory new or trust fully** -::: - -When you initialize your ledger, a 24-word mnemonic is generated and stored in the device. This mnemonic is compatible with Cosmos and Cosmos accounts can be derived from it. Therefore, all you have to do is make your ledger compatible with `gaiad`. To do so, you need to go through the following steps: - -1. Download the Ledger Live app [here](https://www.ledger.com/ledger-live). -2. Connect your ledger via USB and update to the latest firmware -3. Go to the ledger live app store, and download the "Cosmos" application (this can take a while). **Note: You may have to enable `Dev Mode` in the `Settings` of Ledger Live to be able to download the "Cosmos" application**. -4. Navigate to the Cosmos app on your ledger device - -Then, to create an account, use the following command: - -```bash -gaiad keys add --ledger -``` - -:::warning -**This command will only work while the Ledger is plugged in and unlocked** -::: - -- `` is the name of the account. It is a reference to the account number used to derive the key pair from the mnemonic. You will use this name to identify your account when you want to send a transaction. -- You can add the optional `--account` flag to specify the path (`0`, `1`, `2`, ...) you want to use to generate your account. By default, account `0` is generated. - -#### Using a Computer - -:::warning -**NOTE: It is more secure to perform this action on an offline computer** -::: - -To generate an account, just use the following command: - -```bash -gaiad keys add -``` - -The command will generate a 24-words mnemonic and save the private and public keys for account `0` -at the same time. -Each time you want to send a transaction, you will need to unlock your system's credentials store. -If you lose access to your credentials storage, you can always recover the private key with the -mnemonic. - -:::tip -**You may not be prompted for password each time you send a transaction since most operating systems -unlock user's credentials store upon login by default. If you want to change your credentials -store security policies please refer to your operating system manual.** -::: - -:::warning -**Do not lose or share your 12 words with anyone. To prevent theft or loss of funds, it is best to ensure that you keep multiple copies of your mnemonic, and store it in a safe, secure place and that only you know how to access. If someone is able to gain access to your mnemonic, they will be able to gain access to your private keys and control the accounts associated with them.** -::: - -:::warning -After you have secured your mnemonic (triple check!), you can delete bash history to ensure no one can retrieve it: - -```bash -history -c -rm ~/.bash_history -``` - -::: - -- `` is the name of the account. It is a reference to the account number used to derive the key pair from the mnemonic. You will use this name to identify your account when you want to send a transaction. -- You can add the optional `--account` flag to specify the path (`0`, `1`, `2`, ...) you want to use to generate your account. By default, account `0` is generated. - -You can generate more accounts from the same mnemonic using the following command: - -```bash -gaiad keys add --recover --account 1 -``` - -This command will prompt you to input a passphrase as well as your mnemonic. Change the account number to generate a different account. - -## Accessing the Cosmos Hub Network - -In order to query the state and send transactions, you need a way to access the network. To do so, you can either run your own full-node, or connect to someone else's. - -:::warning -**NOTE: Do not share your mnemonic (12 or 24 words) with anyone. The only person who should ever need to know it is you. This is especially important if you are ever approached via email or direct message by someone requesting that you share your mnemonic for any kind of blockchain services or support. No one from Cosmos, the Tendermint team or the Interchain Foundation will ever send an email that asks for you to share any kind of account credentials or your mnemonic."**. -::: - -### Running Your Own Full-Node - -This is the most secure option, but comes with relatively high resource requirements. In order to run your own full-node, you need good bandwidth and at least 1TB of disk space. - -You will find the tutorial on how to install `gaiad` [here](../getting-started/installation.md), and the guide to run a full-node [here](../hub-tutorials/join-mainnet.md). - -### Connecting to a Remote Full-Node - -If you do not want or cannot run your own node, you can connect to someone else's full-node. You should pick an operator you trust, because a malicious operator could return incorrect query results or censor your transactions. However, they will never be able to steal your funds, as your private keys are stored locally on your computer or ledger device. Possible options of full-node operators include validators, wallet providers or exchanges. - -In order to connect to the full-node, you will need an address of the following form: `https://77.87.106.33:26657` (*Note: This is a placeholder*). This address has to be communicated by the full-node operator you choose to trust. You will use this address in the [following section](#setting-up-gaiad). - -## Setting Up `gaiad` - -:::tip -**Before setting up `gaiad`, make sure you have set up a way to [access the Cosmos Hub network](#accessing-the-cosmos-hub-network)** -::: - -:::warning -**Please check that you are always using the latest stable release of `gaiad`** -::: - -`gaiad` is the tool that enables you to interact with the node that runs on the Cosmos Hub network, whether you run it yourself or not. Let us set it up properly. - -In order to set up `gaiad`, use the following command: - -```bash -gaiad config -``` - -It allows you to set a default value for each given flag. - -First, set up the address of the full-node you want to connect to: - -```bash -gaiad config node : - -// query the list of validators -gaiad query staking validators - -// query the information of a validator given their address (e.g. cosmosvaloper1n5pepvmgsfd3p2tqqgvt505jvymmstf6s9gw27) -gaiad query staking validator - -// query all delegations made from a delegator given their address (e.g. cosmos10snjt8dmpr5my0h76xj48ty80uzwhraqalu4eg) -gaiad query staking delegations - -// query a specific delegation made from a delegator (e.g. cosmos10snjt8dmpr5my0h76xj48ty80uzwhraqalu4eg) to a validator (e.g. cosmosvaloper1n5pepvmgsfd3p2tqqgvt505jvymmstf6s9gw27) given their addresses -gaiad query staking delegation - -// query the rewards of a delegator given a delegator address (e.g. cosmos10snjt8dmpr5my0h76xj48ty80uzwhraqalu4eg) -gaiad query distribution rewards - -// query all proposals currently open for depositing -gaiad query gov proposals --status deposit_period - -// query all proposals currently open for voting -gaiad query gov proposals --status voting_period - -// query a proposal given its proposalID -gaiad query gov proposal -``` - -For more commands, just type: - -```bash -gaiad query -``` - -For each command, you can use the `-h` or `--help` flag to get more information. - -## Sending Transactions - -:::warning -On Cosmos Hub mainnet, the accepted denom is `uatom`, where `1atom = 1,000,000uatom` -::: - -### A Note on Gas and Fees - -Transactions on the Cosmos Hub network need to include a transaction fee in order to be processed. This fee pays for the gas required to run the transaction. The formula is the following: - -```js -fees = ceil(gas * gasPrices) -``` - -The `gas` is dependent on the transaction. Different transaction require different amount of `gas`. The `gas` amount for a transaction is calculated as it is being processed, but there is a way to estimate it beforehand by using the `auto` value for the `gas` flag. Of course, this only gives an estimate. You can adjust this estimate with the flag `--gas-adjustment` (default `1.0`) if you want to be sure you provide enough `gas` for the transaction. For the remainder of this tutorial, we will use a `--gas-adjustment` of `1.5`. - -The `gasPrice` is the price of each unit of `gas`. Each validator sets a `min-gas-price` value, and will only include transactions that have a `gasPrice` greater than their `min-gas-price`. - -The transaction `fees` are the product of `gas` and `gasPrice`. As a user, you have to input 2 out of 3. The higher the `gasPrice`/`fees`, the higher the chance that your transaction will get included in a block. - -:::tip -For mainnet, the recommended `gas-prices` is `0.0025uatom`. -::: - -### Sending Tokens - -:::tip -**Before you can bond atoms and withdraw rewards, you need to [set up `gaiad`](#setting-up-gaiad) and [create an account](#creating-an-account)** -::: - -:::warning -**Note: These commands need to be run on an online computer. It is more secure to perform them commands using a Ledger Nano S device. For the offline procedure, click [here](#signing-transactions-from-an-offline-computer).** -::: - -```bash -// Send a certain amount of tokens to an address -// Ex value for parameters (do not actually use these values in your tx!!): =cosmos16m93fezfiezhvnjajzrfyszml8qm92a0w67ntjhd3d0 =1000000uatom -// Ex value for flags: =0.0025uatom - -gaiad tx bank send [from_key_or_address] [to_address] [amount] [flags] -``` - -### Bonding Atoms and Withdrawing Rewards - -:::tip -**Before you can bond atoms and withdraw rewards, you need to [set up `gaiad`](#setting-up-gaiad) and [create an account](#creating-an-account)** -::: - -:::warning -**Before bonding Atoms, please read the [delegator faq](https://cosmos.network/resources/delegators) to understand the risk and responsibilities involved with delegating** -::: - -:::warning -**Note: These commands need to be run on an online computer. It is more secure to perform them commands using a ledger device. For the offline procedure, click [here](#signing-transactions-from-an-offline-computer).** -::: - -```bash -// Bond a certain amount of Atoms to a given validator -// ex value for flags: =cosmosvaloper18thamkhnj9wz8pa4nhnp9rldprgant57pk2m8s, =10000000uatom, =0.0025uatom - -gaiad tx staking delegate --from --gas auto --gas-adjustment 1.5 --gas-prices - - -// Redelegate a certain amount of Atoms from a validator to another -// Can only be used if already bonded to a validator -// Redelegation takes effect immediately, there is no waiting period to redelegate -// After a redelegation, no other redelegation can be made from the account for the next 3 weeks -// ex value for flags: =cosmosvaloper18thamkhnj9wz8pa4nhnp9rldprgant57pk2m8s, =100000000uatom, =0.0025uatom - -gaiad tx staking redelegate --from --gas auto --gas-adjustment 1.5 --gas-prices - -// Withdraw all rewards -// ex value for flag: =0.0025uatom - -gaiad tx distribution withdraw-all-rewards --from --gas auto --gas-adjustment 1.5 --gas-prices - - -// Unbond a certain amount of Atoms from a given validator -// You will have to wait 3 weeks before your Atoms are fully unbonded and transferrable -// ex value for flags: =cosmosvaloper18thamkhnj9wz8pa4nhnp9rldprgant57pk2m8s, =10000000uatom, =0.0025uatom - -gaiad tx staking unbond --from --gas auto --gas-adjustment 1.5 --gas-prices -``` - -:::warning -**If you use a connected Ledger, you will be asked to confirm the transaction on the device before it is signed and broadcast to the network. Note that the command will only work while the Ledger is plugged in and unlocked.** -::: - -To confirm that your transaction went through, you can use the following queries: - -```bash -// your balance should change after you bond Atoms or withdraw rewards -gaiad query account - -// you should have delegations after you bond Atom -gaiad query staking delegations - -// this returns your tx if it has been included -// use the tx hash that was displayed when you created the tx -gaiad query tx - -``` - -Double check with a block explorer if you interact with the network through a trusted full-node. - -## Participating in Governance - -### Primer on Governance - -The Cosmos Hub has a built-in governance system that lets bonded Atom holders vote on proposals. There are three types of proposal: - -- `Text Proposals`: These are the most basic type of proposals. They can be used to get the opinion of the network on a given topic. -- `Parameter Proposals`: These are used to update the value of an existing parameter. -- `Software Upgrade Proposal`: These are used to propose an upgrade of the Hub's software. - -Any Atom holder can submit a proposal. In order for the proposal to be open for voting, it needs to come with a `deposit` that is greater than a parameter called `minDeposit`. The `deposit` need not be provided in its entirety by the submitter. If the initial proposer's `deposit` is not sufficient, the proposal enters the `deposit_period` status. Then, any Atom holder can increase the deposit by sending a `depositTx`. - -Once the `deposit` reaches `minDeposit`, the proposal enters the `voting_period`, which lasts 2 weeks. Any **bonded** Atom holder can then cast a vote on this proposal. The options are `Yes`, `No`, `NoWithVeto` and `Abstain`. The weight of the vote is based on the amount of bonded Atoms of the sender. If they don't vote, delegator inherit the vote of their validator. However, delegators can override their validator's vote by sending a vote themselves. - -At the end of the voting period, the proposal is accepted if there are more than 50% `Yes` votes (excluding `Abstain` votes) and less than 33.33% of `NoWithVeto` votes (excluding `Abstain` votes). - -### In Practice - -:::tip -**Before you can bond atoms and withdraw rewards, you need to [bond Atoms](#bonding-atoms-and-withdrawing-rewards)** -::: - -:::warning -**Note: These commands need to be run on an online computer. It is more secure to perform them commands using a ledger device. For the offline procedure, click [here](#signing-transactions-from-an-offline-computer).** -::: - -```bash -// Submit a Proposal -// =text/parameter_change/software_upgrade -// ex value for flag: =0.0025uatom - -// the proposal must meet the minimum deposit amount - please check the current chain params -gaiad tx gov submit-legacy-proposal --title "Test Text Proposal" --description "My awesome proposal" --type "text" --deposit=10000000uatom --gas auto --gas-adjustment 1.5 --gas-prices --from - -// Increase deposit of a proposal -// Retrieve proposalID from $gaiad query gov proposals --status deposit_period -// ex value for parameter: =10000000uatom - -gaiad tx gov deposit --gas auto --gas-adjustment 1.5 --gas-prices --from - -// Vote on a proposal -// Retrieve proposalID from $gaiad query gov proposals --status voting_period -//