diff --git a/.gitignore b/.gitignore index bdba911a4..a31254dbb 100644 --- a/.gitignore +++ b/.gitignore @@ -513,3 +513,4 @@ secring.* junit*.xml node_modules/ coverage/ +deleteme.yaml diff --git a/README.md b/README.md index fd90a5a64..6805dd7eb 100644 --- a/README.md +++ b/README.md @@ -199,13 +199,21 @@ Kubernetes Context : kind-solo Kubernetes Cluster : kind-solo Kubernetes Namespace : solo ********************************************************************************** -✔ Initialize -✔ Install chart 'fullstack-deployment' [1s] -✔ Waiting for network pods to be ready [2m38s] - ✔ Node: node0 (Pod: network-node0-0) [2m38s] - ✔ Node: node1 (Pod: network-node1-0) - ✔ Node: node2 (Pod: network-node2-0) - +✔ Initialize [5s] +✔ Install chart 'fullstack-deployment' [19s] +✔ Check node pods are ready [2s] + ✔ Check Node: node0 [0.8s] + ✔ Check Node: node1 [1s] + ✔ Check Node: node2 [0.9s] +✔ Check proxy pods are ready [0.7s] + ✔ Check HAProxy for: node0 [0.7s] + ✔ Check HAProxy for: node1 [0.7s] + ✔ Check HAProxy for: node2 [0.7s] + ✔ Check Envoy Proxy for: node0 [0.7s] + ✔ Check Envoy Proxy for: node1 [0.7s] + ✔ Check Envoy Proxy for: node2 [0.7s] +✔ Check auxiliary pods are ready [5s] + ✔ Check MinIO [5s] ``` * Setup node with Hedera platform software. @@ -233,9 +241,9 @@ Kubernetes Namespace : solo ✔ Copy gRPC TLS keys to staging ✔ Prepare config.txt for the network ✔ Fetch platform software into network nodes [1m7s] - ✔ Node: node0 [48s] - ✔ Node: node1 [44s] - ✔ Node: node2 [1m7s] + ✔ Update node: node0 [48s] + ✔ Update node: node1 [44s] + ✔ Update node: node2 [1m7s] ✔ Setup network nodes [1s] ✔ Node: node0 [1s] ✔ Copy Gossip keys [0.2s] @@ -296,17 +304,35 @@ Kubernetes Context : kind-solo-e2e Kubernetes Cluster : kind-solo-e2e Kubernetes Namespace : solo ********************************************************************************** -✔ Initialize -✔ Enable mirror-node [4s] - ✔ Prepare address book [0.1s] - ✔ Deploy mirror-node [3s] -✔ Check Mirror node components are ACTIVE [3s] - ✔ Check Postgres DB [3s] - ✔ Check Importer - ✔ Check REST API - ✔ Check Web3 - ✔ Check GRPC - ✔ Check Hedera Explorer +✔ Initialize [7s] +✔ Enable mirror-node [1m10s] + ✔ Prepare address book [1s] + ✔ Deploy mirror-node [1m9s] +✔ Check pods are ready [59s] + ✔ Check Postgres DB [12s] + ✔ Check REST API [39s] + ✔ Check GRPC [30s] + ✔ Check Monitor [59s] + ✔ Check Importer [48s] + ✔ Check Hedera Explorer [0.7s] +``` + +* Deploy a JSON RPC relay + +``` +$ solo relay deploy +******************************* Solo ********************************************* +Version : 0.22.1 +Kubernetes Context : microk8s +Kubernetes Cluster : microk8s-cluster +Kubernetes Namespace : solo +********************************************************************************** +(node:7924) [DEP0040] DeprecationWarning: The `punycode` module is deprecated. Please use a userland alternative instead. +(Use `node --trace-deprecation ...` to show where the warning was created) +✔ Initialize [1s] +✔ Prepare chart values +✔ Deploy JSON RPC Relay [9s] +✔ Check relay is ready [21s] ``` You may view the list of pods using `k9s` as below: diff --git a/package-lock.json b/package-lock.json index b51f799ad..7251d80f8 100644 --- a/package-lock.json +++ b/package-lock.json @@ -26,7 +26,9 @@ "figlet": "^1.7.0", "got": "^14.2.0", "inquirer": "^9.2.15", + "ip": "^2.0.1", "js-base64": "^3.7.7", + "js-yaml": "^4.1.0", "listr2": "^8.0.2", "semver": "^7.6.0", "stream-buffers": "^3.0.2", @@ -773,12 +775,6 @@ "url": "https://opencollective.com/eslint" } }, - "node_modules/@eslint/eslintrc/node_modules/argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", - "dev": true - }, "node_modules/@eslint/eslintrc/node_modules/globals": { "version": "13.24.0", "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", @@ -794,18 +790,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/@eslint/eslintrc/node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", - "dev": true, - "dependencies": { - "argparse": "^2.0.1" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, "node_modules/@eslint/eslintrc/node_modules/type-fest": { "version": "0.20.2", "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", @@ -1488,6 +1472,28 @@ "node": ">=8" } }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dev": true, + "dependencies": { + "sprintf-js": "~1.0.2" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/js-yaml": { + "version": "3.14.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", + "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "dev": true, + "dependencies": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, "node_modules/@istanbuljs/schema": { "version": "0.1.3", "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz", @@ -1927,22 +1933,6 @@ "openid-client": "^5.3.0" } }, - "node_modules/@kubernetes/client-node/node_modules/argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" - }, - "node_modules/@kubernetes/client-node/node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", - "dependencies": { - "argparse": "^2.0.1" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, "node_modules/@listr2/prompt-adapter-enquirer": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/@listr2/prompt-adapter-enquirer/-/prompt-adapter-enquirer-2.0.2.tgz", @@ -2715,13 +2705,9 @@ } }, "node_modules/argparse": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", - "dev": true, - "dependencies": { - "sprintf-js": "~1.0.2" - } + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" }, "node_modules/array-buffer-byte-length": { "version": "1.0.0", @@ -4522,12 +4508,6 @@ "url": "https://opencollective.com/eslint" } }, - "node_modules/eslint/node_modules/argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", - "dev": true - }, "node_modules/eslint/node_modules/chalk": { "version": "4.1.2", "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", @@ -4587,18 +4567,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/eslint/node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", - "dev": true, - "dependencies": { - "argparse": "^2.0.1" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, "node_modules/eslint/node_modules/locate-path": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", @@ -5721,6 +5689,11 @@ "node": ">= 0.4" } }, + "node_modules/ip": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/ip/-/ip-2.0.1.tgz", + "integrity": "sha512-lJUL9imLTNi1ZfXT+DU6rBBdbiKGBuay9B6xGSPVjUeQwaH1RIGqef8RZkUtHioLmSNpPR5M4HVKJGm1j8FWVQ==" + }, "node_modules/ipaddr.js": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-2.1.0.tgz", @@ -7021,13 +6994,11 @@ "dev": true }, "node_modules/js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", - "dev": true, + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", "dependencies": { - "argparse": "^1.0.7", - "esprima": "^4.0.0" + "argparse": "^2.0.1" }, "bin": { "js-yaml": "bin/js-yaml.js" diff --git a/package.json b/package.json index cec98d806..bbf0ed4df 100644 --- a/package.json +++ b/package.json @@ -38,7 +38,9 @@ "figlet": "^1.7.0", "got": "^14.2.0", "inquirer": "^9.2.15", + "ip": "^2.0.1", "js-base64": "^3.7.7", + "js-yaml": "^4.1.0", "listr2": "^8.0.2", "semver": "^7.6.0", "stream-buffers": "^3.0.2", diff --git a/resources/profiles/custom-spec.yaml b/resources/profiles/custom-spec.yaml new file mode 100644 index 000000000..f9e309e49 --- /dev/null +++ b/resources/profiles/custom-spec.yaml @@ -0,0 +1,261 @@ +local: # 3 nodes, ~950 TPS (Docker Desktop 8 cores, 16 GB RAM) + consensus: # use chart defaults + haproxy: # use chart defaults + envoyProxy: # use chart defaults + rpcRelay: + resources: + requests: + cpu: 0 + memory: 0 + limits: + cpu: 500m + memory: 1000Mi + explorer: + resources: + requests: + cpu: 200m + memory: 100Mi + limits: + cpu: 1000m + memory: 250Mi + mirror: + postgresql: + persistence: + size: 10Gi + resources: + requests: + cpu: 0 + memory: 0 + limits: + cpu: 1000m + memory: 1000Mi + importer: + resources: + requests: + cpu: 0 + memory: 0 + limits: + cpu: 1000m + memory: 2000Mi + rest: + resources: + requests: + cpu: 0 + memory: 0 + limits: + cpu: 500m + memory: 500Mi + grpc: + resources: + requests: + cpu: 0 + memory: 0 + limits: + cpu: 500m + memory: 1000Mi + web3: + resources: + requests: + cpu: 0 + memory: 0 + limits: + cpu: 500m + memory: 500Mi + monitor: + resources: + requests: + cpu: 0 + memory: 0 + limits: + cpu: 500m + memory: 1000Mi + minio: # use chart defaults + +tiny: # 3 nodes, ~990 TPS + consensus: + root: + resources: + requests: + cpu: 3500m + memory: 4000Mi + limits: + cpu: 3750m + memory: 7750Mi + mirror: + postgresql: + persistence: + size: 100Gi + postgresql: + resources: + requests: + cpu: 250m + memory: 500Mi + limits: + cpu: 1500m + memory: 2000Mi + importer: + resources: + requests: + cpu: 500m + memory: 1000Mi + limits: + cpu: 1500m + memory: 2000Mi + rest: + resources: + requests: + cpu: 0 + memory: 0 + limits: + cpu: 1000m + memory: 500Mi + grpc: + resources: + requests: + cpu: 0 + memory: 0 + limits: + cpu: 1000m + memory: 1000Mi + web3: + resources: + requests: + cpu: 0 + memory: 0 + limits: + cpu: 1000m + memory: 1000Mi + monitor: + resources: + requests: + cpu: 0 + memory: 0 + limits: + cpu: 1000m + memory: 1000Mi + minio: + tenant: + pools: + - servers: 1 + name: pool-1 + volumesPerServer: 1 + size: 10Gi #volume size + resources: + requests: + cpu: 250m + memory: 1000Mi + limits: + cpu: 1000m + memory: 2000Mi + +small: + consensus: + root: + resources: + requests: + cpu: 8000m + memory: 8000Mi + limits: + cpu: 11500m + memory: 12000Mi + mirror: + importer: + resources: + requests: + cpu: 1000m + memory: 2000Mi + limits: + cpu: 2000m + memory: 4000Mi + minio: + tenant: + pools: + - servers: 1 + name: pool-1 + volumesPerServer: 1 + size: 200Gi #volume size + resources: + requests: + cpu: 0 + memory: 0 + limits: + cpu: 0 + memory: 0 + +medium: + consensus: + root: + resources: + requests: + cpu: 12000m + memory: 16000Mi + limits: + cpu: 16000m + memory: 24000Mi + mirror: + importer: + resources: + requests: + cpu: 1000m + memory: 2000Mi + limits: + cpu: 2000m + memory: 4000Mi + minio: + tenant: + pools: + - servers: 1 + name: pool-1 + volumesPerServer: 1 + size: 200Gi #volume size + resources: + requests: + cpu: 0 + memory: 0 + limits: + cpu: 0 + memory: 0 + +large: + consensus: + root: + resources: + requests: + cpu: 18000m + memory: 32000Mi + limits: + cpu: 24000m + memory: 64000Mi + mirror: + postgresql: + persistence: + size: 500Gi + resources: + requests: + cpu: 1500m + memory: 1500Mi + limits: + cpu: 4000m + memory: 4000Mi + importer: + resources: + requests: + cpu: 2000m + memory: 4000Mi + limits: + cpu: 2000m + memory: 8000Mi + minio: + tenant: + pools: + - servers: 1 + name: pool-1 + volumesPerServer: 1 + size: 500Gi #volume size + resources: + requests: + cpu: 0 + memory: 0 + limits: + cpu: 0 + memory: 0 diff --git a/src/commands/base.mjs b/src/commands/base.mjs index ff90fe358..9e997f625 100644 --- a/src/commands/base.mjs +++ b/src/commands/base.mjs @@ -15,6 +15,7 @@ * */ 'use strict' +import paths from 'path' import { MissingArgumentError } from '../core/errors.mjs' import { ShellRunner } from '../core/shell_runner.mjs' @@ -26,12 +27,24 @@ export class BaseCommand extends ShellRunner { if (chartDir) { const chartPath = `${chartDir}/${chartReleaseName}` await this.helm.dependency('update', chartPath) - return chartPath } return `${chartRepo}/${chartReleaseName}` } + prepareValuesFiles (valuesFile) { + let valuesArg = '' + if (valuesFile) { + const valuesFiles = valuesFile.split(',') + valuesFiles.forEach(vf => { + const vfp = paths.resolve(vf) + valuesArg += ` --values ${vfp}` + }) + } + + return valuesArg + } + constructor (opts) { if (!opts || !opts.logger) throw new Error('An instance of core/Logger is required') if (!opts || !opts.helm) throw new Error('An instance of core/Helm is required') diff --git a/src/commands/flags.mjs b/src/commands/flags.mjs index 998613910..d597b6042 100644 --- a/src/commands/flags.mjs +++ b/src/commands/flags.mjs @@ -99,6 +99,24 @@ export const valuesFile = { } } +export const profileFile = { + name: 'profile-file', + definition: { + describe: 'Resource profile definition (e.g. custom-spec.yaml)', + defaultValue: constants.DEFAULT_PROFILE_FILE, + type: 'string' + } +} + +export const profileName = { + name: 'profile', + definition: { + describe: `Resource profile (${constants.ALL_PROFILES.join(' | ')})`, + defaultValue: constants.PROFILE_LOCAL, + type: 'string' + } +} + export const deployPrometheusStack = { name: 'prometheus-stack', definition: { @@ -109,7 +127,7 @@ export const deployPrometheusStack = { } export const enablePrometheusSvcMonitor = { - name: 'enable-prometheus-svc-monitor', + name: 'prometheus-svc-monitor', definition: { describe: 'Enable prometheus service monitor for the network nodes', defaultValue: false, @@ -161,8 +179,9 @@ export const deployJsonRpcRelay = { export const releaseTag = { name: 'release-tag', definition: { - describe: 'Release tag to be used (e.g. v0.42.5)', + describe: `Release tag to be used (e.g. ${version.HEDERA_PLATFORM_VERSION})`, alias: 't', + defaultValue: version.HEDERA_PLATFORM_VERSION, type: 'string' } } @@ -276,7 +295,7 @@ export const keyFormat = { name: 'key-format', definition: { describe: 'Public and Private key file format (pem or pfx)', - defaultValue: 'pfx', + defaultValue: 'pem', type: 'string' } } @@ -466,7 +485,9 @@ export const allFlags = [ privateKey, accountId, amount, - applicationEnv + applicationEnv, + profileFile, + profileName ] export const allFlagsMap = new Map(allFlags.map(f => [f.name, f])) diff --git a/src/commands/init.mjs b/src/commands/init.mjs index aa139fc16..40a525acc 100644 --- a/src/commands/init.mjs +++ b/src/commands/init.mjs @@ -35,7 +35,8 @@ export class InitCommand extends BaseCommand { setupHomeDirectory (dirs = [ constants.SOLO_HOME_DIR, constants.SOLO_LOGS_DIR, - constants.SOLO_CACHE_DIR + constants.SOLO_CACHE_DIR, + constants.SOLO_VALUES_DIR ]) { const self = this @@ -48,7 +49,7 @@ export class InitCommand extends BaseCommand { }) } catch (e) { this.logger.error(e) - throw new FullstackTestingError(e.message, e) + throw new FullstackTestingError(`failed to create directory: ${e.message}`, e) } return dirs @@ -60,11 +61,16 @@ export class InitCommand extends BaseCommand { */ async init (argv) { const self = this + let cacheDir = this.configManager.getFlag(flags.cacheDir) + if (!cacheDir) { + cacheDir = constants.SOLO_CACHE_DIR + } const tasks = new Listr([ { title: 'Setup home directory and cache', task: async (ctx, _) => { + self.configManager.update(argv) ctx.dirs = this.setupHomeDirectory() } }, @@ -93,29 +99,19 @@ export class InitCommand extends BaseCommand { } }, { - title: 'Copy configuration file templates', + title: `Copy templates in '${cacheDir}'`, task: (ctx, _) => { - let cacheDir = this.configManager.getFlag(flags.cacheDir) - if (!cacheDir) { - cacheDir = constants.SOLO_CACHE_DIR - } - - const templatesDir = `${cacheDir}/templates` - if (!fs.existsSync(templatesDir)) { - fs.mkdirSync(templatesDir) - } + const resources = ['templates', 'profiles'] + for (const dirName of resources) { + const srcDir = path.resolve(path.join(constants.RESOURCES_DIR, dirName)) + if (!fs.existsSync(srcDir)) continue - const configFiles = [ - `${constants.RESOURCES_DIR}/templates/application.properties`, - `${constants.RESOURCES_DIR}/templates/api-permission.properties`, - `${constants.RESOURCES_DIR}/templates/bootstrap.properties`, - `${constants.RESOURCES_DIR}/templates/settings.txt`, - `${constants.RESOURCES_DIR}/templates/log4j2.xml` - ] + const destDir = path.resolve(path.join(cacheDir, dirName)) + if (!fs.existsSync(destDir)) { + fs.mkdirSync(destDir) + } - for (const filePath of configFiles) { - const fileName = path.basename(filePath) - fs.cpSync(`${filePath}`, `${templatesDir}/${fileName}`, { recursive: true }) + fs.cpSync(srcDir, destDir, { recursive: true }) } if (argv.dev) { @@ -160,7 +156,9 @@ export class InitCommand extends BaseCommand { flags.cacheDir, flags.chartDirectory, flags.keyFormat, - flags.fstChartVersion + flags.fstChartVersion, + flags.profileName, + flags.profileFile ) }, handler: (argv) => { diff --git a/src/commands/mirror_node.mjs b/src/commands/mirror_node.mjs index 09fd26394..cc6b90254 100644 --- a/src/commands/mirror_node.mjs +++ b/src/commands/mirror_node.mjs @@ -16,7 +16,7 @@ */ import { ListrEnquirerPromptAdapter } from '@listr2/prompt-adapter-enquirer' import { Listr } from 'listr2' -import { FullstackTestingError, IllegalArgumentError } from '../core/errors.mjs' +import { FullstackTestingError, IllegalArgumentError, MissingArgumentError } from '../core/errors.mjs' import { Templates, constants } from '../core/index.mjs' import { BaseCommand } from './base.mjs' import * as flags from './flags.mjs' @@ -26,7 +26,26 @@ export class MirrorNodeCommand extends BaseCommand { constructor (opts) { super(opts) if (!opts || !opts.accountManager) throw new IllegalArgumentError('An instance of core/AccountManager is required', opts.accountManager) + if (!opts || !opts.profileManager) throw new MissingArgumentError('An instance of core/ProfileManager is required', opts.downloader) + this.accountManager = opts.accountManager + this.profileManager = opts.profileManager + } + + async prepareValuesArg (valuesFile, deployHederaExplorer) { + let valuesArg = '' + if (valuesFile) { + valuesArg += this.prepareValuesFiles(valuesFile) + } + + const profileName = this.configManager.getFlag(flags.profileName) + const profileValuesFile = await this.profileManager.prepareValuesForMirrorNodeChart(profileName) + if (profileValuesFile) { + valuesArg += this.prepareValuesFiles(profileValuesFile) + } + + valuesArg += ` --set hedera-mirror-node.enabled=true --set hedera-explorer.enabled=${deployHederaExplorer}` + return valuesArg } async deploy (argv) { @@ -39,7 +58,9 @@ export class MirrorNodeCommand extends BaseCommand { self.configManager.update(argv) await prompts.execute(task, self.configManager, [ flags.namespace, - flags.deployHederaExplorer + flags.deployHederaExplorer, + flags.profileName, + flags.profileFile ]) ctx.config = { @@ -53,7 +74,10 @@ export class MirrorNodeCommand extends BaseCommand { ctx.config.stagingDir = Templates.renderStagingDir(self.configManager, flags) - ctx.config.valuesArg = ` --set hedera-mirror-node.enabled=true --set hedera-explorer.enabled=${ctx.config.deployHederaExplorer}` + ctx.config.valuesArg = await self.prepareValuesArg( + ctx.config.valuesFile, + ctx.config.deployHederaExplorer + ) if (!await self.k8.hasNamespace(ctx.config.namespace)) { throw new FullstackTestingError(`namespace ${ctx.config.namespace} does not exist`) @@ -93,56 +117,56 @@ export class MirrorNodeCommand extends BaseCommand { } }, { - title: 'Check Mirror node components are ACTIVE', + title: 'Check pods are ready', task: async (ctx, parentTask) => { const subTasks = [ { title: 'Check Postgres DB', - task: async (ctx, _) => self.k8.waitForPod(constants.POD_STATUS_RUNNING, [ + task: async (ctx, _) => self.k8.waitForPodReady([ 'app.kubernetes.io/component=postgresql', 'app.kubernetes.io/name=postgres' - ], 1, 900) - }, - { - title: 'Check Importer', - task: async (ctx, _) => self.k8.waitForPod(constants.POD_STATUS_RUNNING, [ - 'app.kubernetes.io/component=importer', - 'app.kubernetes.io/name=importer' - ], 1, 900) + ], 1, 900, 2000) }, { title: 'Check REST API', - task: async (ctx, _) => self.k8.waitForPod(constants.POD_STATUS_RUNNING, [ + task: async (ctx, _) => self.k8.waitForPodReady([ 'app.kubernetes.io/component=rest', 'app.kubernetes.io/name=rest' - ], 1, 900) - }, - { - title: 'Check Web3', - task: async (ctx, _) => self.k8.waitForPod(constants.POD_STATUS_RUNNING, [ - 'app.kubernetes.io/component=web3', - 'app.kubernetes.io/name=web3' - ], 1, 900) + ], 1, 900, 200) }, { title: 'Check GRPC', - task: async (ctx, _) => self.k8.waitForPod(constants.POD_STATUS_RUNNING, [ + task: async (ctx, _) => self.k8.waitForPodReady([ 'app.kubernetes.io/component=grpc', 'app.kubernetes.io/name=grpc' - ], 1, 900) + ], 1, 900, 2000) + }, + { + title: 'Check Monitor', + task: async (ctx, _) => self.k8.waitForPodReady([ + 'app.kubernetes.io/component=monitor', + 'app.kubernetes.io/name=monitor' + ], 1, 900, 2000) + }, + { + title: 'Check Importer', + task: async (ctx, _) => self.k8.waitForPodReady([ + 'app.kubernetes.io/component=importer', + 'app.kubernetes.io/name=importer' + ], 1, 900, 2000) }, { title: 'Check Hedera Explorer', skip: (ctx, _) => !ctx.config.deployHederaExplorer, - task: async (ctx, _) => self.k8.waitForPod(constants.POD_STATUS_RUNNING, [ + task: async (ctx, _) => self.k8.waitForPodReady([ 'app.kubernetes.io/component=hedera-explorer', 'app.kubernetes.io/name=hedera-explorer' - ], 1, 900) + ], 1, 900, 2000) } ] return parentTask.newListr(subTasks, { - concurrent: false, + concurrent: true, rendererOptions: constants.LISTR_DEFAULT_RENDERER_OPTION }) } @@ -219,7 +243,7 @@ export class MirrorNodeCommand extends BaseCommand { } }, { - title: 'Delete PVCs for namespace', + title: 'Delete PVCs', task: async (ctx, _) => { const pvcs = await self.k8.listPvcsByNamespace(ctx.config.namespace, [ 'app.kubernetes.io/component=postgresql', @@ -266,7 +290,9 @@ export class MirrorNodeCommand extends BaseCommand { desc: 'Deploy mirror-node and its components', builder: y => flags.setCommandFlags(y, flags.namespace, - flags.deployHederaExplorer + flags.deployHederaExplorer, + flags.profileName, + flags.profileFile ), handler: argv => { mirrorNodeCmd.logger.debug('==== Running \'mirror-node deploy\' ===') @@ -285,7 +311,8 @@ export class MirrorNodeCommand extends BaseCommand { command: 'destroy', desc: 'Destroy mirror-node components and database', builder: y => flags.setCommandFlags(y, - flags.namespace + flags.namespace, + flags.force ), handler: argv => { mirrorNodeCmd.logger.debug('==== Running \'mirror-node destroy\' ===') diff --git a/src/commands/network.mjs b/src/commands/network.mjs index 319c2eafe..c4d3f1c46 100644 --- a/src/commands/network.mjs +++ b/src/commands/network.mjs @@ -17,15 +17,22 @@ import { ListrEnquirerPromptAdapter } from '@listr2/prompt-adapter-enquirer' import chalk from 'chalk' import { Listr } from 'listr2' -import { FullstackTestingError } from '../core/errors.mjs' +import { FullstackTestingError, MissingArgumentError } from '../core/errors.mjs' import { BaseCommand } from './base.mjs' import * as flags from './flags.mjs' -import * as paths from 'path' -import { constants, Templates } from '../core/index.mjs' +import { constants } from '../core/index.mjs' import * as prompts from './prompts.mjs' import * as helpers from '../core/helpers.mjs' export class NetworkCommand extends BaseCommand { + constructor (opts) { + super(opts) + + if (!opts || !opts.profileManager) throw new MissingArgumentError('An instance of core/ProfileManager is required', opts.downloader) + + this.profileManager = opts.profileManager + } + getTlsValueArguments (tlsClusterIssuerType, enableHederaExplorerTls, namespace, hederaExplorerTlsLoadBalancerIp, hederaExplorerTlsHostName) { let valuesArg = '' @@ -55,30 +62,25 @@ export class NetworkCommand extends BaseCommand { return valuesArg } - prepareValuesFiles (valuesFile) { - let valuesArg = '' - if (valuesFile) { - const valuesFiles = valuesFile.split(',') - valuesFiles.forEach(vf => { - const vfp = paths.resolve(vf) - valuesArg += ` --values ${vfp}` - }) - } - - return valuesArg - } - - prepareValuesArg (config = {}) { + async prepareValuesArg (config = {}) { let valuesArg = '' if (config.chartDir) { valuesArg = `-f ${config.chartDir}/fullstack-deployment/values.yaml` } - valuesArg += this.prepareValuesFiles(config.valuesFile) + if (config.valuesFile) { + valuesArg += this.prepareValuesFiles(config.valuesFile) + } + + const profileName = this.configManager.getFlag(flags.profileName) + const profileValuesFile = await this.profileManager.prepareValuesForFstChart(profileName) + if (profileValuesFile) { + valuesArg += this.prepareValuesFiles(profileValuesFile) + } // do not deploy mirror node until after we have the updated address book - valuesArg += ' --set hedera-mirror-node.enabled=false --set hedera-explorer.enabled=false' - valuesArg += ` --set telemetry.prometheus.svcMonitor.enabled=${config.enablePrometheusSvcMonitor}` + valuesArg += ' --set "hedera-mirror-node.enabled=false" --set "hedera-explorer.enabled=false"' + valuesArg += ` --set "telemetry.prometheus.svcMonitor.enabled=${config.enablePrometheusSvcMonitor}"` if (config.enableHederaExplorerTls) { valuesArg += this.getTlsValueArguments(config.tlsClusterIssuerType, config.enableHederaExplorerTls, config.namespace, @@ -87,18 +89,9 @@ export class NetworkCommand extends BaseCommand { if (config.releaseTag) { const rootImage = helpers.getRootImageRepository(config.releaseTag) - valuesArg += ` --set defaults.root.image.repository=${rootImage}` + valuesArg += ` --set "defaults.root.image.repository=${rootImage}"` } - // prepare name and account IDs for nodes - const realm = constants.HEDERA_NODE_ACCOUNT_ID_START.realm - const shard = constants.HEDERA_NODE_ACCOUNT_ID_START.shard - let accountId = constants.HEDERA_NODE_ACCOUNT_ID_START.num - let i = 0 - config.nodeIds.forEach(nodeId => { - valuesArg += ` --set hedera.nodes[${i}].name=${nodeId},hedera.nodes[${i++}].accountId=${realm}.${shard}.${accountId++}` - }) - this.logger.debug('Prepared helm chart values', { valuesArg }) return valuesArg } @@ -113,7 +106,9 @@ export class NetworkCommand extends BaseCommand { flags.tlsClusterIssuerType, flags.enableHederaExplorerTls, flags.hederaExplorerTlsHostName, - flags.enablePrometheusSvcMonitor + flags.enablePrometheusSvcMonitor, + flags.profileFile, + flags.profileName ] this.configManager.update(argv) @@ -124,7 +119,7 @@ export class NetworkCommand extends BaseCommand { const config = { releaseTag: this.configManager.getFlag(flags.releaseTag), namespace: this.configManager.getFlag(flags.namespace), - nodeIds: helpers.parseNodeIDs(this.configManager.getFlag(flags.nodeIDs)), + nodeIds: helpers.parseNodeIds(this.configManager.getFlag(flags.nodeIDs)), chartDir: this.configManager.getFlag(flags.chartDirectory), fstChartVersion: this.configManager.getFlag(flags.fstChartVersion), valuesFile: this.configManager.getFlag(flags.valuesFile), @@ -139,9 +134,12 @@ export class NetworkCommand extends BaseCommand { config.chartPath = await this.prepareChartPath(config.chartDir, constants.FULLSTACK_TESTING_CHART, constants.FULLSTACK_DEPLOYMENT_CHART) - config.valuesArg = this.prepareValuesArg(config) + config.valuesArg = await this.prepareValuesArg(config) - this.logger.debug('Prepared config', { config, cachedConfig: this.configManager.config }) + this.logger.debug('Prepared config', { + config, + cachedConfig: this.configManager.config + }) return config } @@ -176,14 +174,15 @@ export class NetworkCommand extends BaseCommand { } }, { - title: 'Waiting for network pods to be ready', + title: 'Check node pods are ready', task: async (ctx, task) => { const subTasks = [] + + // nodes for (const nodeId of ctx.config.nodeIds) { - const podName = Templates.renderNetworkPodName(nodeId) subTasks.push({ - title: `Node: ${chalk.yellow(nodeId)} (Pod: ${podName})`, + title: `Check Node: ${chalk.yellow(nodeId)}`, task: () => self.k8.waitForPod(constants.POD_STATUS_RUNNING, [ 'fullstack.hedera.com/type=network-node', @@ -192,6 +191,67 @@ export class NetworkCommand extends BaseCommand { }) } + // set up the sub-tasks + return task.newListr(subTasks, { + concurrent: false, // no need to run concurrently since if one node is up, the rest should be up by then + rendererOptions: { + collapseSubtasks: false + } + }) + } + }, + { + title: 'Check proxy pods are ready', + task: + async (ctx, task) => { + const subTasks = [] + + // HAProxy + for (const nodeId of ctx.config.nodeIds) { + subTasks.push({ + title: `Check HAProxy for: ${chalk.yellow(nodeId)}`, + task: () => + self.k8.waitForPodReady([ + 'fullstack.hedera.com/type=envoy-proxy' + ], 1, 60 * 15, 1000) // timeout 15 minutes + }) + } + + // Envoy Proxy + for (const nodeId of ctx.config.nodeIds) { + subTasks.push({ + title: `Check Envoy Proxy for: ${chalk.yellow(nodeId)}`, + task: () => + self.k8.waitForPodReady([ + 'fullstack.hedera.com/type=haproxy' + ], 1, 60 * 15, 1000) // timeout 15 minutes + }) + } + + // set up the sub-tasks + return task.newListr(subTasks, { + concurrent: true, + rendererOptions: { + collapseSubtasks: false + } + }) + } + }, + { + title: 'Check auxiliary pods are ready', + task: + async (ctx, task) => { + const subTasks = [] + + // minio + subTasks.push({ + title: 'Check MinIO', + task: () => + self.k8.waitForPodReady([ + 'v1.min.io/tenant=minio' + ], 1, 60 * 5, 1000) // timeout 5 minutes + }) + // set up the sub-tasks return task.newListr(subTasks, { concurrent: false, // no need to run concurrently since if one node is up, the rest should be up by then @@ -258,23 +318,17 @@ export class NetworkCommand extends BaseCommand { } }, { - title: 'Get PVCs for namespace', - task: async (ctx, _) => { - if (ctx.config.deletePvcs === true) { - ctx.config.pvcs = await self.k8.listPvcsByNamespace(ctx.config.namespace) - } - } - }, - { - title: 'Delete PVCs for namespace', + title: 'Delete PVCs', task: async (ctx, _) => { + ctx.config.pvcs = await self.k8.listPvcsByNamespace(ctx.config.namespace) + if (ctx.config.pvcs) { for (const pvc of ctx.config.pvcs) { await self.k8.deletePvc(pvc, ctx.config.namespace) } } }, - skip: (ctx, _) => !ctx.config.pvcs + skip: (ctx, _) => !ctx.config.deletePvcs } ], { concurrent: false, @@ -358,10 +412,12 @@ export class NetworkCommand extends BaseCommand { flags.hederaExplorerTlsLoadBalancerIp, flags.hederaExplorerTlsHostName, flags.enablePrometheusSvcMonitor, - flags.fstChartVersion + flags.fstChartVersion, + flags.profileFile, + flags.profileName ), handler: argv => { - networkCmd.logger.debug("==== Running 'network deploy' ===") + networkCmd.logger.debug('==== Running \'network deploy\' ===') networkCmd.logger.debug(argv) networkCmd.deploy(argv).then(r => { @@ -383,7 +439,7 @@ export class NetworkCommand extends BaseCommand { flags.deletePvcs ), handler: argv => { - networkCmd.logger.debug("==== Running 'network destroy' ===") + networkCmd.logger.debug('==== Running \'network destroy\' ===') networkCmd.logger.debug(argv) networkCmd.destroy(argv).then(r => { @@ -412,7 +468,7 @@ export class NetworkCommand extends BaseCommand { flags.enablePrometheusSvcMonitor ), handler: argv => { - networkCmd.logger.debug("==== Running 'chart upgrade' ===") + networkCmd.logger.debug('==== Running \'chart upgrade\' ===') networkCmd.logger.debug(argv) networkCmd.refresh(argv).then(r => { diff --git a/src/commands/node.mjs b/src/commands/node.mjs index 79dd48d94..6e9202760 100644 --- a/src/commands/node.mjs +++ b/src/commands/node.mjs @@ -66,7 +66,7 @@ export class NodeCommand extends BaseCommand { const podName = Templates.renderNetworkPodName(nodeId) try { - await this.k8.waitForPod(constants.POD_STATUS_RUNNING, [ + await this.k8.waitForPodReady([ 'fullstack.hedera.com/type=network-node', `fullstack.hedera.com/node-name=${nodeId}` ], 1) @@ -333,7 +333,7 @@ export class NodeCommand extends BaseCommand { const config = { namespace: self.configManager.getFlag(flags.namespace), - nodeIds: helpers.parseNodeIDs(self.configManager.getFlag(flags.nodeIDs)), + nodeIds: helpers.parseNodeIds(self.configManager.getFlag(flags.nodeIDs)), releaseTag: self.configManager.getFlag(flags.releaseTag), cacheDir: self.configManager.getFlag(flags.cacheDir), force: self.configManager.getFlag(flags.force), @@ -423,7 +423,10 @@ export class NodeCommand extends BaseCommand { } const fileName = path.basename(filePath) - fs.cpSync(`${filePath}`, `${config.stagingDir}/templates/${fileName}`, { recursive: true }) + const destPath = `${config.stagingDir}/templates/${fileName}` + self.logger.debug(`Copying configuration file to staging: ${filePath} -> ${destPath}`) + + fs.cpSync(filePath, destPath, { force: true }) } } }, @@ -495,7 +498,7 @@ export class NodeCommand extends BaseCommand { for (const nodeId of ctx.config.nodeIds) { const podName = ctx.config.podNames[nodeId] subTasks.push({ - title: `Node: ${chalk.yellow(nodeId)}`, + title: `Update node: ${chalk.yellow(nodeId)}`, task: () => self.plaformInstaller.fetchPlatform(podName, config.releaseTag) }) @@ -573,7 +576,7 @@ export class NodeCommand extends BaseCommand { namespace: self.configManager.getFlag(flags.namespace), chartDir: self.configManager.getFlag(flags.chartDirectory), fstChartVersion: self.configManager.getFlag(flags.fstChartVersion), - nodeIds: helpers.parseNodeIDs(self.configManager.getFlag(flags.nodeIDs)), + nodeIds: helpers.parseNodeIds(self.configManager.getFlag(flags.nodeIDs)), applicationEnv: self.configManager.getFlag(flags.applicationEnv), cacheDir: self.configManager.getFlag(flags.cacheDir) } @@ -662,7 +665,7 @@ export class NodeCommand extends BaseCommand { // set up the sub-tasks return parentTask.newListr(subTasks, { - concurrent: false, + concurrent: true, rendererOptions: { collapseSubtasks: false } @@ -694,33 +697,51 @@ export class NodeCommand extends BaseCommand { * @param delay the delay between attempts * @returns {Promise} true if the proxy is up */ - async checkNetworkNodeProxyUp (nodeId, localPort, maxAttempts = 10, delay = 5000) { - const podArray = await this.k8.getPodsByLabel([`app=haproxy-${nodeId}`, 'fullstack.hedera.com/type=haproxy']) + async checkNetworkNodeProxyUp (nodeId, localPort, maxAttempts = 15, delay = 2000) { + const podLabels = [`app=haproxy-${nodeId}`, 'fullstack.hedera.com/type=haproxy'] + let podArray = await this.k8.getPodsByLabel(podLabels) let attempts = 0 + let status = null if (podArray.length > 0) { - const podName = podArray[0].metadata.name - this._portForwards.push(await this.k8.portForward(podName, localPort, 5555)) + let podName = podArray[0].metadata.name + let portForwarder = null + try { - await this.k8.testConnection('localhost', localPort) - } catch (e) { - throw new FullstackTestingError(`failed to create port forward for '${nodeId}' proxy on port ${localPort}`, e) - } + while (attempts < maxAttempts) { + if (attempts === 0) { + portForwarder = await this.k8.portForward(podName, localPort, 5555) + await this.k8.testConnection('localhost', localPort) + } else if (attempts % 5 === 0) { + this.logger.debug(`Recycling proxy ${podName} [attempt: ${attempts}/${maxAttempts}]`) + await this.k8.stopPortForward(portForwarder) + await this.k8.recyclePodByLabels(podLabels, 50) + podArray = await this.k8.getPodsByLabel(podLabels) + podName = podArray[0].metadata.name + portForwarder = await this.k8.portForward(podName, localPort, 5555) + await this.k8.testConnection('localhost', localPort) + } - while (attempts < maxAttempts) { - try { - const status = await this.getNodeProxyStatus(`http://localhost:${localPort}/v2/services/haproxy/stats/native?type=backend`) + status = await this.getNodeProxyStatus(`http://localhost:${localPort}/v2/services/haproxy/stats/native?type=backend`) if (status === 'UP') { - this.logger.debug(`Proxy ${podName} is UP. [attempt: ${attempts}/${maxAttempts}]`) - return true + break } - attempts++ this.logger.debug(`Proxy ${podName} is not UP. Checking again in ${delay}ms ... [attempt: ${attempts}/${maxAttempts}]`) + attempts++ await sleep(delay) - } catch (e) { - throw new FullstackTestingError(`failed to create port forward for '${nodeId}' proxy on port ${localPort}`, e) } + } catch (e) { + throw new FullstackTestingError(`failed to check proxy for '${nodeId}' on port ${localPort}: ${e.message}`, e) + } finally { + if (portForwarder !== null) { + this._portForwards.push(portForwarder) + } + } + + if (status === 'UP') { + this.logger.debug(`Proxy ${podName} is UP. [attempt: ${attempts}/${maxAttempts}]`) + return true } } @@ -742,7 +763,7 @@ export class NodeCommand extends BaseCommand { ctx.config = { namespace: self.configManager.getFlag(flags.namespace), - nodeIds: helpers.parseNodeIDs(self.configManager.getFlag(flags.nodeIDs)) + nodeIds: helpers.parseNodeIds(self.configManager.getFlag(flags.nodeIDs)) } if (!await self.k8.hasNamespace(ctx.config.namespace)) { @@ -806,7 +827,7 @@ export class NodeCommand extends BaseCommand { ]) const config = { - nodeIds: helpers.parseNodeIDs(self.configManager.getFlag(flags.nodeIDs)), + nodeIds: helpers.parseNodeIds(self.configManager.getFlag(flags.nodeIDs)), cacheDir: self.configManager.getFlag(flags.cacheDir), generateGossipKeys: self.configManager.getFlag(flags.generateGossipKeys), generateTlsKeys: self.configManager.getFlag(flags.generateTlsKeys), @@ -994,8 +1015,7 @@ export class NodeCommand extends BaseCommand { signal: AbortSignal.timeout(5000), headers: { Authorization: `Basic ${Buffer.from( - `${constants.NODE_PROXY_USER_ID}:${constants.NODE_PROXY_PASSWORD}`).toString( - 'base64')}` + `${constants.NODE_PROXY_USER_ID}:${constants.NODE_PROXY_PASSWORD}`).toString('base64')}` } }) const response = await res.json() diff --git a/src/commands/prompts.mjs b/src/commands/prompts.mjs index 7a3dc1321..44b1a94a6 100644 --- a/src/commands/prompts.mjs +++ b/src/commands/prompts.mjs @@ -43,6 +43,7 @@ async function prompt (type, task, input, defaultValue, promptMessage, emptyChec throw new FullstackTestingError(`input failed: ${flagName}: ${e.message}`, e) } } + async function promptText (task, input, defaultValue, promptMessage, emptyCheckMessage, flagName) { return await prompt('text', task, input, defaultValue, promptMessage, emptyCheckMessage, flagName) } @@ -117,6 +118,10 @@ export async function promptChainId (task, input) { export async function promptChartDir (task, input) { try { + if (input === 'false') { + return '' + } + if (input && !fs.existsSync(input)) { input = await task.prompt(ListrEnquirerPromptAdapter).run({ type: 'text', @@ -155,6 +160,46 @@ export async function promptValuesFile (task, input) { } } +export async function promptProfileFile (task, input) { + if (input && !fs.existsSync(input)) { + input = await task.prompt(ListrEnquirerPromptAdapter).run({ + type: 'text', + default: flags.valuesFile.definition.defaultValue, + message: 'Enter path to custom resource profile definition file: ' + }) + } + + if (input && !fs.existsSync(input)) { + throw new IllegalArgumentError(`Invalid profile definition file: ${input}}`, input) + } + + return input +} + +export async function promptProfile (task, input, choices = constants.ALL_PROFILES) { + try { + const initial = choices.indexOf(input) + if (initial < 0) { + const input = await task.prompt(ListrEnquirerPromptAdapter).run({ + type: 'select', + initial: choices.indexOf(flags.keyFormat.definition.defaultValue), + message: 'Select profile for fullstack network deployment', + choices: helpers.cloneArray(choices) + }) + + if (!input) { + throw new FullstackTestingError('key-format cannot be empty') + } + + return input + } + + return input + } catch (e) { + throw new FullstackTestingError(`input failed: ${flags.keyFormat.name}: ${e.message}`, e) + } +} + export async function promptDeployPrometheusStack (task, input) { return await promptToggle(task, input, flags.deployPrometheusStack.definition.defaultValue, @@ -393,6 +438,8 @@ export function getPromptMap () { .set(flags.privateKey.name, promptPrivateKey) .set(flags.accountId.name, promptAccountId) .set(flags.amount.name, promptAmount) + .set(flags.profileFile.name, promptProfileFile) + .set(flags.profileName.name, promptProfile) } // build the prompt registry diff --git a/src/commands/relay.mjs b/src/commands/relay.mjs index 8f611593e..515ec3659 100644 --- a/src/commands/relay.mjs +++ b/src/commands/relay.mjs @@ -15,7 +15,6 @@ * */ import { Listr } from 'listr2' -import * as paths from 'path' import { FullstackTestingError, MissingArgumentError } from '../core/errors.mjs' import * as helpers from '../core/helpers.mjs' import { constants } from '../core/index.mjs' @@ -24,14 +23,24 @@ import * as flags from './flags.mjs' import * as prompts from './prompts.mjs' export class RelayCommand extends BaseCommand { - prepareValuesArg (valuesFile, nodeIDs, chainID, relayRelease, replicaCount, operatorID, operatorKey) { + constructor (opts) { + super(opts) + + if (!opts || !opts.profileManager) throw new MissingArgumentError('An instance of core/ProfileManager is required', opts.downloader) + + this.profileManager = opts.profileManager + } + + async prepareValuesArg (valuesFile, nodeIDs, chainID, relayRelease, replicaCount, operatorID, operatorKey) { let valuesArg = '' if (valuesFile) { - const valuesFiles = valuesFile.split(',') - valuesFiles.forEach(vf => { - const vfp = paths.resolve(vf) - valuesArg += ` --values ${vfp}` - }) + valuesArg += this.prepareValuesFiles(valuesFile) + } + + const profileName = this.configManager.getFlag(flags.profileName) + const profileValuesFile = await this.profileManager.prepareValuesForRpcRelayChart(profileName) + if (profileValuesFile) { + valuesArg += this.prepareValuesFiles(profileValuesFile) } valuesArg += ` --set config.MIRROR_NODE_URL=${constants.FULLSTACK_DEPLOYMENT_CHART}-rest` @@ -61,7 +70,7 @@ export class RelayCommand extends BaseCommand { } nodeIDs.forEach(nodeID => { - const networkKey = `network-${nodeID.trim()}-0-svc:50211` + const networkKey = `network-${nodeID.trim()}-0:50211` valuesArg += ` --set config.HEDERA_NETWORK.${networkKey}=0.0.3` }) @@ -101,7 +110,9 @@ export class RelayCommand extends BaseCommand { flags.relayReleaseTag, flags.replicaCount, flags.operatorId, - flags.operatorKey + flags.operatorKey, + flags.profileName, + flags.profileFile ]) // prompt if inputs are empty and set it in the context @@ -109,7 +120,7 @@ export class RelayCommand extends BaseCommand { chartDir: self.configManager.getFlag(flags.chartDirectory), namespace: self.configManager.getFlag(flags.namespace), valuesFile: self.configManager.getFlag(flags.valuesFile), - nodeIds: helpers.parseNodeIDs(self.configManager.getFlag(flags.nodeIDs)), + nodeIds: helpers.parseNodeIds(self.configManager.getFlag(flags.nodeIDs)), chainId: self.configManager.getFlag(flags.chainId), relayRelease: self.configManager.getFlag(flags.relayReleaseTag), replicaCount: self.configManager.getFlag(flags.replicaCount), @@ -127,7 +138,7 @@ export class RelayCommand extends BaseCommand { title: 'Prepare chart values', task: async (ctx, _) => { ctx.chartPath = await self.prepareChartPath(ctx.config.chartDir, constants.JSON_RPC_RELAY_CHART, constants.JSON_RPC_RELAY_CHART) - ctx.valuesArg = self.prepareValuesArg( + ctx.valuesArg = await self.prepareValuesArg( ctx.config.valuesFile, ctx.config.nodeIds, ctx.config.chainId, @@ -151,14 +162,26 @@ export class RelayCommand extends BaseCommand { await self.k8.waitForPod(constants.POD_STATUS_RUNNING, [ 'app=hedera-json-rpc-relay', `app.kubernetes.io/instance=${releaseName}` - ], 1, 120, 1000) - - this.logger.showList('Deployed Relays', await self.chartManager.getInstalledCharts(namespace)) + ], 1, 900, 1000) // reset nodeID self.configManager.setFlag(flags.nodeIDs, '') self.configManager.persist() } + }, + { + title: 'Check relay is ready', + task: async (ctx, _) => { + const releaseName = ctx.releaseName + try { + await self.k8.waitForPodReady([ + 'app=hedera-json-rpc-relay', + `app.kubernetes.io/instance=${releaseName}` + ], 1, 100, 2000) + } catch (e) { + throw new FullstackTestingError(`Relay ${releaseName} is not ready: ${e.message}`, e) + } + } } ], { concurrent: false, @@ -195,7 +218,7 @@ export class RelayCommand extends BaseCommand { ctx.config = { chartDir: self.configManager.getFlag(flags.chartDirectory), namespace: self.configManager.getFlag(flags.namespace), - nodeIds: helpers.parseNodeIDs(self.configManager.getFlag(flags.nodeIDs)) + nodeIds: helpers.parseNodeIds(self.configManager.getFlag(flags.nodeIDs)) } ctx.releaseName = this.prepareReleaseName(ctx.config.nodeIds) @@ -253,7 +276,9 @@ export class RelayCommand extends BaseCommand { flags.nodeIDs, flags.relayReleaseTag, flags.operatorId, - flags.operatorKey + flags.operatorKey, + flags.profileName, + flags.profileFile ) }, handler: argv => { diff --git a/src/core/account_manager.mjs b/src/core/account_manager.mjs index dd0a47ae0..983f57850 100644 --- a/src/core/account_manager.mjs +++ b/src/core/account_manager.mjs @@ -16,6 +16,7 @@ */ import * as HashgraphProto from '@hashgraph/proto' import * as Base64 from 'js-base64' +import os from 'os' import * as constants from './constants.mjs' import { AccountCreateTransaction, @@ -32,6 +33,7 @@ import { } from '@hashgraph/sdk' import { FullstackTestingError, MissingArgumentError } from './errors.mjs' import { Templates } from './templates.mjs' +import ip from 'ip' const REASON_FAILED_TO_GET_KEYS = 'failed to get keys for accountId' const REASON_SKIPPED = 'skipped since it does not have a genesis key' @@ -162,6 +164,35 @@ export class AccountManager { } } + shouldUseLocalHostPortForward (serviceObject) { + if (!serviceObject.loadBalancerIp) return true + + const loadBalancerIp = serviceObject.loadBalancerIp + const interfaces = os.networkInterfaces() + let usePortForward = true + const loadBalancerIpFormat = ip.isV6Format(loadBalancerIp) ? 'ipv4' : 'ipv6' + + // check if serviceIP falls into any subnet of the network interfaces + for (const nic of Object.keys(interfaces)) { + const inf = interfaces[nic] + for (const item of inf) { + if (item.family.toLowerCase() === loadBalancerIpFormat && + ip.cidrSubnet(item.cidr).contains(loadBalancerIp)) { + usePortForward = false + break + } + } + } + + if (usePortForward) { + this.logger.debug('Local network and Load balancer are in different network, using local host port forward') + } else { + this.logger.debug('Local network and Load balancer are in the same network, using load balancer IP port forward') + } + + return usePortForward + } + /** * Returns a node client that can be used to make calls against * @param namespace the namespace for which the node client resides @@ -176,7 +207,7 @@ export class AccountManager { let localPort = constants.LOCAL_NODE_START_PORT for (const serviceObject of serviceMap.values()) { - const usePortForward = !(serviceObject.loadBalancerIp) + const usePortForward = this.shouldUseLocalHostPortForward(serviceObject) const host = usePortForward ? '127.0.0.1' : serviceObject.loadBalancerIp const port = serviceObject.grpcPort const targetPort = usePortForward ? localPort : port diff --git a/src/core/config_manager.mjs b/src/core/config_manager.mjs index 4dce6edb4..edd5663d7 100644 --- a/src/core/config_manager.mjs +++ b/src/core/config_manager.mjs @@ -114,7 +114,7 @@ export class ConfigManager { let val = argv[flag.name] switch (flag.definition.type) { case 'string': - if (flag.name === flags.chartDirectory.name || flag.name === flags.cacheDir.name) { + if (val && (flag.name === flags.chartDirectory.name || flag.name === flags.cacheDir.name)) { this.logger.debug(`Resolving directory path for '${flag.name}': ${val}`) val = paths.resolve(val) } diff --git a/src/core/constants.mjs b/src/core/constants.mjs index df5ffd55e..f1f1ec257 100644 --- a/src/core/constants.mjs +++ b/src/core/constants.mjs @@ -27,12 +27,14 @@ export const USER_SANITIZED = USER.replace(/[\W_]+/g, '-') export const SOLO_HOME_DIR = process.env.SOLO_HOME || `${process.env.HOME}/.solo` export const SOLO_LOGS_DIR = `${SOLO_HOME_DIR}/logs` export const SOLO_CACHE_DIR = `${SOLO_HOME_DIR}/cache` +export const SOLO_VALUES_DIR = `${SOLO_CACHE_DIR}/values-files` export const DEFAULT_NAMESPACE = 'default' export const HELM = 'helm' export const KEYTOOL = 'keytool' export const CWD = process.cwd() export const SOLO_CONFIG_FILE = `${SOLO_HOME_DIR}/solo.config` export const RESOURCES_DIR = normalize(CUR_FILE_DIR + '/../../resources') +export const PROFILES_DIR = normalize(`${RESOURCES_DIR}/profiles`) export const ROOT_CONTAINER = 'root-container' @@ -89,7 +91,14 @@ export const NODE_PROXY_USER_ID = process.env.NODE_PROXY_USER_ID || 'admin' export const NODE_PROXY_PASSWORD = process.env.NODE_PROXY_PASSWORD || 'adminpwd' export const POD_STATUS_RUNNING = 'Running' -export const POD_STATUS_READY = 'Ready' + +export const POD_CONDITION_INITIALIZED = 'Initialized' +export const POD_CONDITION_READY = 'Ready' +export const POD_CONDITION_CONTAINERS_READY = 'ContainersReady' + +export const POD_CONDITION_POD_SCHEDULED = 'PodScheduled' +export const POD_CONDITION_STATUS_TRUE = 'True' +export const POD_CONDITION_STATUS_FALSE = 'False' // Listr related export const LISTR_DEFAULT_RENDERER_TIMER_OPTION = { @@ -128,3 +137,12 @@ export const OS_MAC = 'mac' export const OS_LINUX = 'linux' export const LOCAL_HOST = '127.0.0.1' + +export const PROFILE_LARGE = 'large' +export const PROFILE_MEDIUM = 'medium' +export const PROFILE_SMALL = 'small' +export const PROFILE_TINY = 'tiny' +export const PROFILE_LOCAL = 'local' + +export const ALL_PROFILES = [PROFILE_LOCAL, PROFILE_TINY, PROFILE_SMALL, PROFILE_MEDIUM, PROFILE_LARGE] +export const DEFAULT_PROFILE_FILE = `${SOLO_CACHE_DIR}/profiles/custom-spec.yaml` diff --git a/src/core/helm.mjs b/src/core/helm.mjs index 09b3cd42c..ea6ab13ac 100644 --- a/src/core/helm.mjs +++ b/src/core/helm.mjs @@ -44,7 +44,7 @@ export class Helm extends ShellRunner { * @returns {Promise} console output as an array of strings */ async install (...args) { - return this.run(this.prepareCommand('install', ...args), true) + return this.run(this.prepareCommand('install', ...args)) } /** diff --git a/src/core/helpers.mjs b/src/core/helpers.mjs index 30cb5d52e..8af2f1323 100644 --- a/src/core/helpers.mjs +++ b/src/core/helpers.mjs @@ -33,7 +33,7 @@ export function sleep (ms) { }) } -export function parseNodeIDs (input) { +export function parseNodeIds (input) { if (typeof input === 'string') { const nodeIds = [] input.split(',').forEach(item => { @@ -164,3 +164,9 @@ export function backupOldPemKeys (nodeIds, keysDir, curDate = new Date(), dirPre return backupDir } + +export function isNumeric (str) { + if (typeof str !== 'string') return false // we only process strings! + return !isNaN(str) && // use type coercion to parse the _entirety_ of the string (`parseFloat` alone does not do this)... + !isNaN(parseFloat(str)) // ...and ensure strings of whitespace fail +} diff --git a/src/core/index.mjs b/src/core/index.mjs index 3026e9536..6a0ca8c4b 100644 --- a/src/core/index.mjs +++ b/src/core/index.mjs @@ -26,11 +26,14 @@ import { ChartManager } from './chart_manager.mjs' import { ConfigManager } from './config_manager.mjs' import { KeyManager } from './key_manager.mjs' import { Keytool } from './keytool.mjs' +import { ProfileManager } from './profile_manager.mjs' +import * as helpers from './helpers.mjs' // Expose components from the core module export { logging, constants, + helpers, Helm, K8, PackageDownloader, @@ -40,5 +43,6 @@ export { ChartManager, ConfigManager, KeyManager, - Keytool + Keytool, + ProfileManager } diff --git a/src/core/k8.mjs b/src/core/k8.mjs index 40979abd5..6a11e3859 100644 --- a/src/core/k8.mjs +++ b/src/core/k8.mjs @@ -25,6 +25,7 @@ import * as sb from 'stream-buffers' import * as tar from 'tar' import { v4 as uuid4 } from 'uuid' import { V1ObjectMeta, V1Secret } from '@kubernetes/client-node' +import { sleep } from './helpers.mjs' import { constants } from './index.mjs' /** @@ -34,6 +35,8 @@ import { constants } from './index.mjs' * For parallel execution, create separate instances by invoking clone() */ export class K8 { + static PodReadyCondition = new Map().set(constants.POD_CONDITION_READY, constants.POD_CONDITION_STATUS_TRUE) + constructor (configManager, logger) { if (!configManager) throw new MissingArgumentError('An instance of core/ConfigManager is required') if (!logger) throw new MissingArgumentError('An instance of core/Logger is required') @@ -720,6 +723,27 @@ export class K8 { }) } + async recyclePodByLabels (podLabels, maxAttempts = 50) { + const podArray = await this.getPodsByLabel(podLabels) + for (const pod of podArray) { + const podName = pod.metadata.name + await this.kubeClient.deleteNamespacedPod(podName, this.configManager.getFlag(flags.namespace)) + } + + let attempts = 0 + while (attempts++ < maxAttempts) { + const status = await this.waitForPod(constants.POD_STATUS_RUNNING, podLabels) + if (status) { + const newPods = await this.getPodsByLabel(podLabels) + if (newPods.length === podArray.length) return newPods + } + + await sleep(2000) + } + + throw new FullstackTestingError(`pods are not running after deletion with labels [${podLabels.join(',')}]`) + } + /** * Wait for pod * @param status phase of the pod @@ -727,7 +751,7 @@ export class K8 { * @param podCount number of pod expected * @param maxAttempts maximum attempts to check * @param delay delay between checks in milliseconds - * @return {Promise} + * @return {Promise<[*]>} */ async waitForPod (status = 'Running', labels = [], podCount = 1, maxAttempts = 10, delay = 500) { const ns = this._getNamespace() @@ -755,13 +779,76 @@ export class K8 { this.logger.debug(`${resp.body.items.length}/${podCount} pod found [namespace:${ns}, fieldSector(${fieldSelector}, labelSelector: ${labelSelector}] [attempt: ${attempts}/${maxAttempts}]`) if (resp.body && resp.body.items && resp.body.items.length === podCount) { - return resolve(true) + return resolve(resp.body.items) + } + + if (attempts++ < maxAttempts) { + setTimeout(check, delay) + } else { + return reject(new FullstackTestingError(`Expected number of pod (${podCount}) not found ${fieldSelector} ${labelSelector} [attempts = ${attempts}/${maxAttempts}]`)) + } + } + + check() + }) + } + + async waitForPodReady (labels = [], podCount = 1, maxAttempts = 10, delay = 500) { + return this.waitForPodCondition(K8.PodReadyCondition, labels, podCount, maxAttempts, delay) + } + + async waitForPodCondition ( + conditionsMap, + labels = [], + podCount = 1, maxAttempts = 10, delay = 500) { + if (!conditionsMap || conditionsMap.size === 0) throw new MissingArgumentError('pod conditions are required') + const ns = this._getNamespace() + const labelSelector = labels.join(',') + + this.logger.debug(`WaitForCondition [namespace:${ns}, conditions = ${conditionsMap.toString()} labelSelector: ${labelSelector}], maxAttempts: ${maxAttempts}`) + + return new Promise((resolve, reject) => { + let attempts = 0 + + const check = async () => { + this.logger.debug(`Checking for pod ready [namespace:${ns}, labelSelector: ${labelSelector}] [attempt: ${attempts}/${maxAttempts}]`) + + // wait for the pod to be available with the given status and labels + const pods = await this.waitForPod(constants.POD_STATUS_RUNNING, labels, podCount, maxAttempts, delay) + this.logger.debug(`${pods.length}/${podCount} pod found [namespace:${ns}, labelSelector: ${labelSelector}] [attempt: ${attempts}/${maxAttempts}]`) + + if (pods.length >= podCount) { + const podWithMatchedCondition = [] + + // check conditions + for (const pod of pods) { + let matchedCondition = 0 + for (const cond of pod.status.conditions) { + for (const entry of conditionsMap.entries()) { + const condType = entry[0] + const condStatus = entry[1] + if (cond.type === condType && cond.status === condStatus) { + this.logger.debug(`Pod condition met for ${pod.metadata.name} [type: ${cond.type} status: ${cond.status}]`) + matchedCondition++ + } + } + + if (matchedCondition >= conditionsMap.size) { + podWithMatchedCondition.push(pod) + break + } + } + } + + if (podWithMatchedCondition.length >= podCount) { + return resolve(podWithMatchedCondition) + } } if (attempts++ < maxAttempts) { setTimeout(check, delay) } else { - reject(new FullstackTestingError(`Expected number of pod (${podCount}) not found ${fieldSelector} ${labelSelector} [maxAttempts = ${maxAttempts}]`)) + return reject(new FullstackTestingError(`Pod not found with expected conditions [maxAttempts = ${maxAttempts}]`)) } } diff --git a/src/core/logging.mjs b/src/core/logging.mjs index f0bbeb2cf..2e55f3e61 100644 --- a/src/core/logging.mjs +++ b/src/core/logging.mjs @@ -142,7 +142,7 @@ export const Logger = class { } console.log(chalk.red('***********************************************************************************')) - this.error(err.message, { error: err.message, stacktrace: stack }) + this.debug(err.message, { error: err.message, stacktrace: stack }) } error (msg, ...args) { diff --git a/src/core/profile_manager.mjs b/src/core/profile_manager.mjs new file mode 100644 index 000000000..34deb19b2 --- /dev/null +++ b/src/core/profile_manager.mjs @@ -0,0 +1,317 @@ +/** + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the ""License""); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an ""AS IS"" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +import fs from 'fs' +import path from 'path' +import { FullstackTestingError, IllegalArgumentError, MissingArgumentError } from './errors.mjs' +import * as yaml from 'js-yaml' +import { flags } from '../commands/index.mjs' +import { constants, helpers } from './index.mjs' + +const resourceRequestTypes = ['requests', 'limits'] +const hardwareTypes = ['cpu', 'memory'] +const consensusSidecars = [ + 'recordStreamUploader', 'eventStreamUploader', 'backupUploader', 'accountBalanceUploader', 'otelCollector'] + +export class ProfileManager { + /** + * Constructor + * @param logger an instance of core/Logger + * @param configManager an instance of core/ConfigManager + * @param cacheDir cache directory where the values file will be written. A yaml file named .yaml is created. + */ + constructor (logger, configManager, cacheDir = constants.SOLO_VALUES_DIR) { + if (!logger) throw new MissingArgumentError('An instance of core/Logger is required') + if (!configManager) throw new MissingArgumentError('An instance of core/ConfigManager is required') + + this.logger = logger + this.configManager = configManager + this.profiles = new Map() + + cacheDir = path.resolve(cacheDir) + this.cacheDir = cacheDir + } + + loadProfiles (forceReload = false) { + const profileFile = this.configManager.getFlag(flags.profileFile) + if (!profileFile) throw new MissingArgumentError('profileFile is required') + + // return the cached value as quickly as possible + if (this.profiles && this.profileFile === profileFile && !forceReload) { + return this.profiles + } + + if (!fs.existsSync(profileFile)) throw new IllegalArgumentError(`profileFile does not exist: ${profileFile}`) + + // load profile file + this.profiles = new Map() + const yamlData = fs.readFileSync(profileFile, 'utf8') + const profileItems = yaml.load(yamlData) + + // add profiles + for (const key in profileItems) { + let profile = profileItems[key] + profile = profile || {} + this.profiles.set(key, profile) + } + + this.profileFile = profileFile + return this.profiles + } + + getProfile (profileName) { + if (!profileName) throw new MissingArgumentError('profileName is required') + if (!this.profiles || this.profiles.size <= 0) { + this.loadProfiles() + } + + if (!this.profiles || !this.profiles.has(profileName)) throw new IllegalArgumentError(`Profile does not exists with name: ${profileName}`) + return this.profiles.get(profileName) + } + + /** + * Set value in the yaml object + * @param itemPath item path in the yaml + * @param value value to be set + * @param yamlRoot root of the yaml object + * @return {*} + * @private + */ + _setValue (itemPath, value, yamlRoot) { + // find the location where to set the value in the yaml + const itemPathParts = itemPath.split('.') + let parent = yamlRoot + let current = parent + let prevItemPath = '' + for (let itemPathPart of itemPathParts) { + if (helpers.isNumeric(itemPathPart)) { + itemPathPart = Number.parseInt(itemPathPart) // numeric path part can only be array index i.e. an integer + if (!Array.isArray(parent[prevItemPath])) { + parent[prevItemPath] = [] + } + + if (!parent[prevItemPath][itemPathPart]) { + parent[prevItemPath][itemPathPart] = {} + } + + parent = parent[prevItemPath] + prevItemPath = itemPathPart + current = parent[itemPathPart] + } else { + if (!current[itemPathPart]) { + current[itemPathPart] = {} + } + + parent = current + prevItemPath = itemPathPart + current = parent[itemPathPart] + } + } + + parent[prevItemPath] = value + return yamlRoot + } + + /** + * Set resources for the chart + * @param itemPath item path in the yaml + * @param itemResources item resources object + * @param yamlRoot root of the yaml object + * @private + */ + _setChartResources (itemPath, itemResources, yamlRoot) { + if (!itemResources || !itemResources.resources) return + + for (const resourceRequestType of resourceRequestTypes) { + if (itemResources && itemResources.resources[resourceRequestType]) { + const resources = itemResources.resources[resourceRequestType] + for (const hardware of hardwareTypes) { + if (resources[hardware] !== undefined) { + if (itemPath) { + this._setValue(`${itemPath}.resources.${resourceRequestType}.${hardware}`, resources[hardware], yamlRoot) + } else { + this._setValue(`resources.${resourceRequestType}.${hardware}`, resources[hardware], yamlRoot) + } + } + } + } + } + } + + resourcesForConsensusPod (profile, nodeIds, yamlRoot) { + if (!profile) throw new MissingArgumentError('profile is required') + + // prepare name and account IDs for nodes + const realm = constants.HEDERA_NODE_ACCOUNT_ID_START.realm + const shard = constants.HEDERA_NODE_ACCOUNT_ID_START.shard + let accountId = constants.HEDERA_NODE_ACCOUNT_ID_START.num + + // set consensus pod level resources + for (let nodeIndex = 0; nodeIndex < nodeIds.length; nodeIndex++) { + this._setValue(`hedera.nodes.${nodeIndex}.name`, nodeIds[nodeIndex], yamlRoot) + this._setValue(`hedera.nodes.${nodeIndex}.accountId`, `${realm}.${shard}.${accountId++}`, yamlRoot) + this._setChartResources(`hedera.nodes.${nodeIndex}`, profile.consensus, yamlRoot) + } + + if (profile.consensus) { + // set default for consensus pod + this._setChartResources('defaults.root', profile.consensus.root, yamlRoot) + + // set sidecar resources + for (const sidecar of consensusSidecars) { + this._setChartResources(`defaults.sidecars.${sidecar}`, profile.consensus[sidecar], yamlRoot) + } + } + + return yamlRoot + } + + resourcesForHaProxyPod (profile, yamlRoot) { + if (!profile) throw new MissingArgumentError('profile is required') + if (!profile.haproxy) return // use chart defaults + + return this._setChartResources('defaults.haproxy', profile.haproxy, yamlRoot) + } + + resourcesForEnvoyProxyPod (profile, yamlRoot) { + if (!profile) throw new MissingArgumentError('profile is required') + if (!profile.envoyProxy) return // use chart defaults + return this._setChartResources('defaults.envoyProxy', profile.envoyProxy, yamlRoot) + } + + resourcesForHederaExplorerPod (profile, yamlRoot) { + if (!profile) throw new MissingArgumentError('profile is required') + if (!profile.explorer) return + return this._setChartResources('hedera-explorer', profile.explorer, yamlRoot) + } + + resourcesForMinioTenantPod (profile, yamlRoot) { + if (!profile) throw new MissingArgumentError('profile is required') + if (!profile.minio || !profile.minio.tenant) return // use chart defaults + + for (const poolIndex in profile.minio.tenant.pools) { + const pool = profile.minio.tenant.pools[poolIndex] + for (const prop in pool) { + if (prop !== 'resources') { + this._setValue(`minio-server.tenant.pools.${poolIndex}.${prop}`, pool[prop], yamlRoot) + } + } + + this._setChartResources(`minio-server.tenant.pools.${poolIndex}`, pool, yamlRoot) + } + + return yamlRoot + } + + /** + * Prepare a values file for FST Helm chart + * @param profileName resource profile name + * @return {Promise} return the full path to the values file + */ + prepareValuesForFstChart (profileName) { + if (!profileName) throw new MissingArgumentError('profileName is required') + const profile = this.getProfile(profileName) + + const nodeIds = helpers.parseNodeIds(this.configManager.getFlag(flags.nodeIDs)) + if (!nodeIds) throw new FullstackTestingError('Node IDs are not set in the config') + + // generate the yaml + const yamlRoot = {} + this.resourcesForConsensusPod(profile, nodeIds, yamlRoot) + this.resourcesForHaProxyPod(profile, yamlRoot) + this.resourcesForEnvoyProxyPod(profile, yamlRoot) + this.resourcesForMinioTenantPod(profile, yamlRoot) + + // write the yaml + const cachedValuesFile = path.join(this.cacheDir, `fst-${profileName}.yaml`) + return new Promise((resolve, reject) => { + fs.writeFile(cachedValuesFile, yaml.dump(yamlRoot), (err) => { + if (err) { + reject(err) + } + + resolve(cachedValuesFile) + }) + }) + } + + /** + * Prepare a values file for rpc-relay Helm chart + * @param profileName resource profile name + * @return {Promise} return the full path to the values file + */ + prepareValuesForRpcRelayChart (profileName) { + if (!profileName) throw new MissingArgumentError('profileName is required') + const profile = this.getProfile(profileName) + if (!profile.rpcRelay) return Promise.resolve()// use chart defaults + + // generate the yaml + const yamlRoot = {} + this._setChartResources('', profile.rpcRelay, yamlRoot) + + // write the yaml + const cachedValuesFile = path.join(this.cacheDir, `rpcRelay-${profileName}.yaml`) + return new Promise((resolve, reject) => { + fs.writeFile(cachedValuesFile, yaml.dump(yamlRoot), (err) => { + if (err) { + reject(err) + } + + resolve(cachedValuesFile) + }) + }) + } + + /** + * Prepare a values file for mirror-node Helm chart + * @param profileName resource profile name + * @return {Promise} return the full path to the values file + */ + prepareValuesForMirrorNodeChart (profileName) { + if (!profileName) throw new MissingArgumentError('profileName is required') + const profile = this.getProfile(profileName) + if (!profile.mirror) return Promise.resolve() // use chart defaults + + // generate the yaml + const yamlRoot = {} + if (profile.mirror.postgresql) { + if (profile.mirror.postgresql.persistence) { + this._setValue('hedera-mirror-node.postgresql.persistence.size', profile.mirror.postgresql.persistence.size, yamlRoot) + } + + this._setChartResources('hedera-mirror-node.postgresql.postgresql', profile.mirror.postgresql.postgresql, yamlRoot) + } + + this._setChartResources('hedera-mirror-node.importer', profile.mirror.importer, yamlRoot) + this._setChartResources('hedera-mirror-node.rest', profile.mirror.rest, yamlRoot) + this._setChartResources('hedera-mirror-node.web3', profile.mirror.web3, yamlRoot) + this._setChartResources('hedera-mirror-node.grpc', profile.mirror.grpc, yamlRoot) + this._setChartResources('hedera-mirror-node.monitor', profile.mirror.monitor, yamlRoot) + this.resourcesForHederaExplorerPod(profile, yamlRoot) + + // write the yaml + const cachedValuesFile = path.join(this.cacheDir, `mirror-${profileName}.yaml`) + return new Promise((resolve, reject) => { + fs.writeFile(cachedValuesFile, yaml.dump(yamlRoot), (err) => { + if (err) { + reject(err) + } + + resolve(cachedValuesFile) + }) + }) + } +} diff --git a/src/core/templates.mjs b/src/core/templates.mjs index 13cc12ec3..39a7fe52e 100644 --- a/src/core/templates.mjs +++ b/src/core/templates.mjs @@ -25,12 +25,8 @@ export class Templates { return `network-${nodeId}-0` } - static renderNodeSvcName (nodeId) { - return `network-${nodeId}-svc` - } - static renderNetworkSvcName (nodeId) { - return `network-${nodeId}-svc` + return `network-${nodeId}` } /** diff --git a/src/index.mjs b/src/index.mjs index 83d04dee1..16789c94a 100644 --- a/src/index.mjs +++ b/src/index.mjs @@ -27,7 +27,7 @@ import { PlatformInstaller, Helm, logging, - KeyManager, Zippy, constants + KeyManager, Zippy, constants, ProfileManager } from './core/index.mjs' import 'dotenv/config' import { K8 } from './core/k8.mjs' @@ -54,6 +54,7 @@ export function main (argv) { const platformInstaller = new PlatformInstaller(logger, k8) const keyManager = new KeyManager(logger) const accountManager = new AccountManager(logger, k8) + const profileManager = new ProfileManager(logger, configManager) // set cluster and namespace in the global configManager from kubernetes context // so that we don't need to prompt the user @@ -72,7 +73,8 @@ export function main (argv) { depManager, keyManager, accountManager, - keytoolDepManager + keytoolDepManager, + profileManager } const processArguments = (argv, yargs) => { diff --git a/test/data/test-profiles.yaml b/test/data/test-profiles.yaml new file mode 100644 index 000000000..3357aed81 --- /dev/null +++ b/test/data/test-profiles.yaml @@ -0,0 +1,148 @@ +test: + consensus: + root: + resources: + requests: + cpu: 0 + memory: 0 + limits: + cpu: 1000m + memory: 1000Mi + recordStreamUploader: + resources: + requests: + cpu: 0 + memory: 0 + limits: + cpu: 150m + memory: 250Mi + eventStreamUploader: + resources: + requests: + cpu: 0 + memory: 0 + limits: + cpu: 150m + memory: 250Mi + accountBalanceUploader: + resources: + requests: + cpu: 0 + memory: 0 + limits: + cpu: 150m + memory: 250Mi + backupUploader: + resources: + requests: + cpu: 0 + memory: 0 + limits: + cpu: 150m + memory: 250Mi + otelCollector: + resources: + requests: + cpu: 0 + memory: 0 + limits: + cpu: 250m + memory: 500Mi + rpcRelay: + resources: + requests: + cpu: 0 + memory: 0 + limits: + cpu: 250m + memory: 500Mi + haproxy: + resources: + requests: + cpu: 0 + memory: 0 + limits: + cpu: 1000m + memory: 1000Mi + envoyProxy: + resources: + requests: + cpu: 0 + memory: 0 + limits: + cpu: 500m + memory: 500Mi + explorer: + resources: + requests: + cpu: 200m + memory: 100Mi + limits: + cpu: 1000m + memory: 250Mi + mirror: + postgresql: + persistence: + size: 250Gi + postgresql: + resources: + requests: + cpu: 1000m + memory: 1250Mi + limits: + cpu: 1500m + memory: 2000Mi + importer: + resources: + requests: + cpu: 0 + memory: 0 + limits: + cpu: 250m + memory: 1000Mi + rest: + resources: + requests: + cpu: 0 + memory: 0 + limits: + cpu: 250m + memory: 500Mi + grpc: + resources: + requests: + cpu: 0 + memory: 0 + limits: + cpu: 250m + memory: 500Mi + web3: + resources: + requests: + cpu: 0 + memory: 0 + limits: + cpu: 250m + memory: 500Mi + monitor: + resources: + requests: + cpu: 0 + memory: 0 + limits: + cpu: 250m + memory: 500Mi + minio: + tenant: + pools: + - servers: 1 + name: pool-1 + volumesPerServer: 1 + size: 10G #volume size + resources: + requests: + cpu: 0 + memory: 0 + limits: + cpu: 1000m + memory: 2000Mi diff --git a/test/e2e/commands/cluster.test.mjs b/test/e2e/commands/cluster.test.mjs index 955bca8e2..0fe1c6f18 100644 --- a/test/e2e/commands/cluster.test.mjs +++ b/test/e2e/commands/cluster.test.mjs @@ -20,7 +20,7 @@ import { beforeEach, describe, expect, - it + it, jest } from '@jest/globals' import { bootstrapTestVariables, @@ -28,13 +28,18 @@ import { TEST_CLUSTER } from '../../test_util.js' import { - constants + constants, + logging } from '../../../src/core/index.mjs' import { flags } from '../../../src/commands/index.mjs' import { sleep } from '../../../src/core/helpers.mjs' import * as version from '../../../version.mjs' describe('ClusterCommand', () => { + // mock showUser and showJSON to silent logging during tests + jest.spyOn(logging.Logger.prototype, 'showUser').mockImplementation() + jest.spyOn(logging.Logger.prototype, 'showJSON').mockImplementation() + const testName = 'cluster-cmd-e2e' const namespace = testName const argv = getDefaultArgv() @@ -75,7 +80,6 @@ describe('ClusterCommand', () => { try { await expect(clusterCmd.reset(argv)).resolves.toBeTruthy() } catch (e) { - clusterCmd.logger.showUserError(e) expect(e).toBeNull() } } @@ -89,7 +93,6 @@ describe('ClusterCommand', () => { try { await expect(clusterCmd.setup(argv)).rejects.toThrowError('Error on cluster setup') } catch (e) { - clusterCmd.logger.showUserError(e) expect(e).toBeNull() } }, 60000) @@ -102,7 +105,6 @@ describe('ClusterCommand', () => { try { await expect(clusterCmd.setup(argv)).resolves.toBeTruthy() } catch (e) { - clusterCmd.logger.showUserError(e) expect(e).toBeNull() } }, 60000) @@ -111,7 +113,6 @@ describe('ClusterCommand', () => { try { await expect(clusterCmd.getClusterInfo()).resolves.toBeTruthy() } catch (e) { - clusterCmd.logger.showUserError(e) expect(e).toBeNull() } }, 60000) @@ -120,7 +121,6 @@ describe('ClusterCommand', () => { try { await expect(clusterCmd.showClusterList()).resolves.toBeTruthy() } catch (e) { - clusterCmd.logger.showUserError(e) expect(e).toBeNull() } }, 60000) @@ -129,7 +129,6 @@ describe('ClusterCommand', () => { try { await expect(clusterCmd.showInstalledChartList()).resolves.toBeUndefined() } catch (e) { - clusterCmd.logger.showUserError(e) expect(e).toBeNull() } }, 60000) @@ -156,7 +155,6 @@ describe('ClusterCommand', () => { try { await expect(clusterCmd.reset(argv)).resolves.toBeTruthy() } catch (e) { - clusterCmd.logger.showUserError(e) expect(e).toBeNull() } }, 60000) diff --git a/test/e2e/core/account_manager.test.mjs b/test/e2e/core/account_manager.test.mjs index 30d6e017e..406c21e97 100644 --- a/test/e2e/core/account_manager.test.mjs +++ b/test/e2e/core/account_manager.test.mjs @@ -53,4 +53,10 @@ describe('AccountManager', () => { expect(accountManager._portForwards.length).toStrictEqual(0) }) + + it('should be able to load a new client', async () => { + await accountManager.loadNodeClient(configManager.getFlag(flags.namespace)) + expect(accountManager._nodeClient).not.toBeNull() + await accountManager.close() + }) }) diff --git a/test/e2e/core/k8_e2e.test.mjs b/test/e2e/core/k8_e2e.test.mjs index d21a9b586..92db39775 100644 --- a/test/e2e/core/k8_e2e.test.mjs +++ b/test/e2e/core/k8_e2e.test.mjs @@ -117,14 +117,35 @@ describe('K8', () => { }) }) - it('should be able to run watch for pod', async () => { - const nodeId = 'node0' + it('should be able to run wait for pod', async () => { const labels = [ - 'fullstack.hedera.com/type=network-node', - `fullstack.hedera.com/node-name=${nodeId}` + 'fullstack.hedera.com/type=network-node' ] - await expect(k8.waitForPod(constants.POD_STATUS_RUNNING, labels)).resolves.toBeTruthy() + const pods = await k8.waitForPod(constants.POD_STATUS_RUNNING, labels, 3) + expect(pods.length).toStrictEqual(3) + }) + + it('should be able to run wait for pod ready', async () => { + const labels = [ + 'fullstack.hedera.com/type=network-node' + ] + + const pods = await k8.waitForPodReady(labels, 3) + expect(pods.length).toStrictEqual(3) + }) + + it('should be able to run wait for pod conditions', async () => { + const labels = [ + 'fullstack.hedera.com/type=network-node' + ] + + const conditions = new Map() + .set(constants.POD_CONDITION_INITIALIZED, constants.POD_CONDITION_STATUS_TRUE) + .set(constants.POD_CONDITION_POD_SCHEDULED, constants.POD_CONDITION_STATUS_TRUE) + .set(constants.POD_CONDITION_READY, constants.POD_CONDITION_STATUS_TRUE) + const pods = await k8.waitForPodCondition(conditions, labels, 3) + expect(pods.length).toStrictEqual(3) }) it('should be able to cat a log file inside the container', async () => { @@ -148,4 +169,11 @@ describe('K8', () => { const pvcs = await k8.listPvcsByNamespace(k8._getNamespace()) expect(pvcs.length).toBeGreaterThan(0) }) + + it('should be able to recycle pod by labels', async () => { + const podLabels = ['app=haproxy-node0', 'fullstack.hedera.com/type=haproxy'] + const podArray1 = await k8.getPodsByLabel(podLabels) + const podsArray2 = await k8.recyclePodByLabels(podLabels) + expect(podsArray2.length >= podArray1.length).toBeTruthy() + }, 60000) }) diff --git a/test/e2e/setup-e2e.sh b/test/e2e/setup-e2e.sh index 8daf44ccb..9d9988a16 100755 --- a/test/e2e/setup-e2e.sh +++ b/test/e2e/setup-e2e.sh @@ -13,7 +13,7 @@ kind create cluster -n "${SOLO_CLUSTER_NAME}" --image "${KIND_IMAGE}" || exit 1 # Most of the e2e test should bootstrap its own network in its own namespace. However, some tests can use this as a # shared resource if required. # ********************************************************************************************************************** -solo init --namespace "${SOLO_NAMESPACE}" -i node0,node1,node2 -t v0.42.5 -s "${SOLO_CLUSTER_SETUP_NAMESPACE}" --fst-chart-version v0.22.0 --dev || exit 1 # cache args for subsequent commands +solo init --namespace "${SOLO_NAMESPACE}" -i node0,node1,node2 -t v0.47.0 -s "${SOLO_CLUSTER_SETUP_NAMESPACE}" --dev || exit 1 # cache args for subsequent commands solo cluster setup || exit 1 helm list --all-namespaces solo network deploy || exit 1 diff --git a/test/test_util.js b/test/test_util.js index 449c51bb5..f783f6910 100644 --- a/test/test_util.js +++ b/test/test_util.js @@ -39,7 +39,7 @@ import { KeyManager, logging, PackageDownloader, - PlatformInstaller, + PlatformInstaller, ProfileManager, Zippy } from '../src/core/index.mjs' @@ -115,6 +115,7 @@ export function bootstrapTestVariables (testName, argv, const k8 = k8Arg || new K8(configManager, testLogger) const platformInstaller = new PlatformInstaller(testLogger, k8) const accountManager = new AccountManager(testLogger, k8, constants) + const profileManager = new ProfileManager(testLogger, configManager) const opts = { logger: testLogger, helm, @@ -127,7 +128,8 @@ export function bootstrapTestVariables (testName, argv, keyManager, accountManager, cacheDir, - keytoolDepManager + keytoolDepManager, + profileManager } const initCmd = initCmdArg || new InitCommand(opts) @@ -193,7 +195,7 @@ export function bootstrapNetwork (testName, argv, it('should succeed with network deploy', async () => { await networkCmd.deploy(argv) - }, 60000) + }, 120000) it('should succeed with node setup command', async () => { expect.assertions(1) diff --git a/test/unit/core/helm.test.mjs b/test/unit/core/helm.test.mjs index a22fb3614..6436e7261 100644 --- a/test/unit/core/helm.test.mjs +++ b/test/unit/core/helm.test.mjs @@ -30,7 +30,7 @@ describe.each([ it('should run helm install', async () => { await helm.install('arg') - expect(shellSpy).toHaveBeenCalledWith(`${helmPath} install arg`, true) + expect(shellSpy).toHaveBeenCalledWith(`${helmPath} install arg`) }) it('should run helm uninstall', async () => { diff --git a/test/unit/core/helpers.test.mjs b/test/unit/core/helpers.test.mjs index 8cccafbf7..64024d639 100644 --- a/test/unit/core/helpers.test.mjs +++ b/test/unit/core/helpers.test.mjs @@ -32,7 +32,7 @@ describe('Helpers', () => { output: ['node0', 'node2'] } ])('should be able to parse node ID', (t) => { - expect(helpers.parseNodeIDs(t.input)).toStrictEqual(t.output) + expect(helpers.parseNodeIds(t.input)).toStrictEqual(t.output) }) it.each([ diff --git a/test/unit/core/profile_manager.test.mjs b/test/unit/core/profile_manager.test.mjs new file mode 100644 index 000000000..2a1c7b346 --- /dev/null +++ b/test/unit/core/profile_manager.test.mjs @@ -0,0 +1,124 @@ +/** + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the ""License""); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an ""AS IS"" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +import { afterAll, describe, expect, it } from '@jest/globals' +import fs from 'fs' +import * as yaml from 'js-yaml' +import path from 'path' +import { flags } from '../../../src/commands/index.mjs' +import { ConfigManager, ProfileManager } from '../../../src/core/index.mjs' +import { getTmpDir, testLogger } from '../../test_util.js' + +const tmpDir = getTmpDir() +const configFile = path.join(tmpDir, 'resource-manager.config') +const configManager = new ConfigManager(testLogger, configFile) +const profileManager = new ProfileManager(testLogger, configManager, tmpDir) +configManager.setFlag(flags.nodeIDs, 'node0,node1,node3') +const testProfileFile = path.resolve('test/data/test-profiles.yaml') + +describe('ProfileManager', () => { + afterAll(() => { + fs.rmSync(tmpDir, { recursive: true }) + }) + + it('should throw error for missing profile file', () => { + expect.assertions(1) + try { + configManager.setFlag(flags.profileFile, 'INVALID') + profileManager.loadProfiles(true) + } catch (e) { + expect(e.message.includes('profileFile does not exist')).toBeTruthy() + } + }) + + it('should be able to load a profile file', () => { + configManager.setFlag(flags.profileFile, testProfileFile) + const profiles = profileManager.loadProfiles(true) + expect(profiles).not.toBeNull() + for (const entry of profiles) { + const profile = entry[1] + expect(profile).not.toBeNull() + for (const component of ['consensus', 'rpcRelay', 'haproxy', 'envoyProxy', 'explorer', 'mirror', 'minio']) { + expect(profile[component] !== undefined).toBeTruthy() + } + } + }) + + describe.each([ + { profileName: 'test', profileFile: testProfileFile } + ])('determine chart values for a profile', (input) => { + it(`should determine FST chart values [profile = ${input.profileName}]`, async () => { + configManager.setFlag(flags.profileFile, input.profileFile) + profileManager.loadProfiles(true) + const valuesFile = await profileManager.prepareValuesForFstChart(input.profileName) + expect(valuesFile).not.toBeNull() + expect(fs.existsSync(valuesFile)).toBeTruthy() + + // validate the yaml + const valuesYaml = yaml.load(fs.readFileSync(valuesFile)) + expect(valuesYaml.hedera.nodes.length).toStrictEqual(3) + expect(valuesYaml.defaults.root.resources.limits.cpu).not.toBeNull() + expect(valuesYaml.defaults.root.resources.limits.memory).not.toBeNull() + + // check all sidecars have resources + for (const component of + ['recordStreamUploader', 'eventStreamUploader', 'backupUploader', 'accountBalanceUploader', 'otelCollector']) { + expect(valuesYaml.defaults.sidecars[component].resources.limits.cpu).not.toBeNull() + expect(valuesYaml.defaults.sidecars[component].resources.limits.memory).not.toBeNull() + } + + // check proxies have resources + for (const component of ['haproxy', 'envoyProxy']) { + expect(valuesYaml.defaults[component].resources.limits.cpu).not.toBeNull() + expect(valuesYaml.defaults[component].resources.limits.memory).not.toBeNull() + } + + // check minio-tenant has resources + expect(valuesYaml['minio-server'].tenant.pools[0].resources.limits.cpu).not.toBeNull() + expect(valuesYaml['minio-server'].tenant.pools[0].resources.limits.memory).not.toBeNull() + }) + + it(`should determine mirror-node chart values [profile = ${input.profileName}]`, async () => { + configManager.setFlag(flags.profileFile, input.profileFile) + profileManager.loadProfiles(true) + const valuesFile = await profileManager.prepareValuesForMirrorNodeChart(input.profileName) + expect(fs.existsSync(valuesFile)).toBeTruthy() + + // validate yaml + const valuesYaml = yaml.load(fs.readFileSync(valuesFile)) + expect(valuesYaml['hedera-mirror-node'].postgresql.persistence.size).not.toBeNull() + expect(valuesYaml['hedera-mirror-node'].postgresql.postgresql.resources.limits.cpu).not.toBeNull() + expect(valuesYaml['hedera-mirror-node'].postgresql.postgresql.resources.limits.memory).not.toBeNull() + for (const component of ['grpc', 'rest', 'web3', 'importer']) { + expect(valuesYaml['hedera-mirror-node'][component].resources.limits.cpu).not.toBeNull() + expect(valuesYaml['hedera-mirror-node'][component].resources.limits.memory).not.toBeNull() + } + expect(valuesYaml['hedera-explorer'].resources.limits.cpu).not.toBeNull() + expect(valuesYaml['hedera-explorer'].resources.limits.memory).not.toBeNull() + }) + + it(`should determine rpc-relay chart values [profile = ${input.profileName}]`, async () => { + configManager.setFlag(flags.profileFile, input.profileFile) + profileManager.loadProfiles(true) + const valuesFile = await profileManager.prepareValuesForRpcRelayChart(input.profileName) + expect(fs.existsSync(valuesFile)).toBeTruthy() + // validate yaml + const valuesYaml = yaml.load(fs.readFileSync(valuesFile)) + expect(valuesYaml.resources.limits.cpu).not.toBeNull() + expect(valuesYaml.resources.limits.memory).not.toBeNull() + }) + }) +}) diff --git a/version.mjs b/version.mjs index e7fa297aa..a7ec6aa39 100644 --- a/version.mjs +++ b/version.mjs @@ -21,4 +21,5 @@ export const JAVA_VERSION = '21.0.1+12' export const HELM_VERSION = 'v3.14.2' -export const FST_CHART_VERSION = 'v0.24.0' +export const FST_CHART_VERSION = 'v0.24.2' +export const HEDERA_PLATFORM_VERSION = 'v0.47.0'