diff --git a/.github/workflows/integration-tests.yaml b/.github/workflows/integration-tests.yaml index c2c93df5..480bb2b1 100644 --- a/.github/workflows/integration-tests.yaml +++ b/.github/workflows/integration-tests.yaml @@ -28,6 +28,24 @@ jobs: go-version: "${{ env.GOVERSION }}" - name: Setup direnv uses: HatsuneMiku3939/direnv-action@v1 + - name: Checkout akash-api + run: | + api_ver=$(go list -mod=readonly -m -f '{{ .Version }}' github.com/akash-network/akash-api) + pushd $(pwd) + cd .. + git clone https://github.com/akash-network/akash-api.git + cd akash-api + git checkout $api_ver + /opt/hostedtoolcache/direnv/2.32.1/x64/direnv allow + make modvendor + popd + + echo -n "use (\n\t.\n)\n" > go.work + echo -n "\n${{ env.GOVERSION }}\n" >> go.work + echo -n "\nreplace (\n\tgithub.com/akash-network/akash-api => ../akash-api\n)\n" >> go.work + go mod tidy + - name: Setup direnv + uses: HatsuneMiku3939/direnv-action@v1 - name: Fetch kind version from go modules run: echo "KIND_VERSION=$(go list -mod=readonly -m -f '{{ .Version }}' sigs.k8s.io/kind)" >> $GITHUB_ENV - name: Set up QEMU @@ -53,6 +71,10 @@ jobs: kubectl config view kubectl cluster-info kubectl get pods,ingress,svc -A + __pod=$(kubectl -n akash-services get pods -l akash.network/component=operator -l akash.network/component=inventory -l app.kubernetes.io/name=operator-inventory-node --no-headers -o custom-columns=":metadata.name") + make -s -C _run/kube kube-deployment-rollout-operator-inventory + kubectl -n akash-services port-forward --address 0.0.0.0 pod/${__pod} 8444:8081 & + ./script/inventory-test.sh --host=localhost:8444 --mode=plaintext akash.inventory.v1.NodeRPC/QueryNode - name: Run E2E Tests run: make test-e2e-integration - name: Run K8s Tests diff --git a/_docs/development-environment.md b/_docs/development-environment.md new file mode 100644 index 00000000..c3eb38a7 --- /dev/null +++ b/_docs/development-environment.md @@ -0,0 +1,252 @@ +# Setting-up development environment + +**Warning** +All links to the `provider` repo are referencing to the `gpu` branch. As soon as `gpu` is merged into `main` all links need update. + +This page covers setting up development environment for both [node](https://github.com/akash-network/node) and [provider](https://github.com/akash-network/provider) repositories. +The provider repo elected as placeholder for all the scripts as it depends on the `node` repo (Better explanation?) +Should you already know what this guide is all about - feel free to explore [examples](#how-to-use-runbook) + +## Code + +Checkout code if not done so into place of your convenience. +For this example, repositories will be located in `~/go/src/github.com/akash-network` + +Checkout below assumes `git` is set to use SSH connection to GitHub + +```shell +cd ~/go/src/github.com/akash-network # all commands below assume this as current directory +git clone git@github.com:akash-netowrk/node +git clone git@github.com:akash-netowrk/provider +``` + +## Requirements + +- `Go` must be installed. Both projects are keeping up-to-date with major version on development branches. + Both repositories are using the latest version of the Go, however only minor that has to always match. + +### Install tools + +Run following script to install all system-wide tools. +Currently supported host platforms: + +- MacOS +- Debian based OS + PRs with another hosts are welcome (except Windows) + +```shell +./provider/script/install_dev_dependencies.sh +``` + +## How it works + +### General behaviour + +All examples are located within [_run](https://github.com/akash-network/provider/blob/gpu/_run) directory. +[Commands](#commands) are implemented as `make` targets. + +There are three ways we use to set up the k8s cluster. + +- kind +- minukube +- ssh + +Both `kind` and `minikube` are e2e, i.e. the configuration is capable of spinning up cluster and the local host, whereas `ssh` expects cluster to be configured before use. + +### Runbook + +There are four configuration variants, each presented as directory within [_run](https://github.com/akash-network/provider/blob/gpu/_run). + +- `kube` - uses `kind` to set up local cluster. It is widely used by e2e testing of the provider. Provider and the node run as host services. All operators run as kubernetes deployments. +- `single` - uses `kind` to set up local cluster. Main difference is both node and provider (and all operators) are running within k8s cluster as deployments. (at some point we will merge `single` + with `kube` and call it `kind`) +- `minikube` - not in use for now +- `ssh` - expects cluster to be up and running. mainly used to test sophisticated features like `GPU` or `IP leases` + +The only difference between environments above is how they set up. Once running, all commands are the same. + +Running through the entire runbook requires multiples terminals. +Each command is marked __t1__-__t3__ to indicate a suggested terminal number. + +If at any point something goes wrong and cluster needs to be run from the beginning: + +```shell +cd _run/ +make kube-cluster-delete +make clean +``` + +### Kustomize + +TBD + +#### Parameters + +| Name | Default value | Effective on target(s) | Notes | +|:---------------------|:------------------------------------------------------------------------------:|------------------------------------------------------------------------------------------------------------------------------------------|-------| +| `SKIP_BUILD` | `false` | | +| `DSEQ` | `1` | `deployment-*`
`lease-*`
`bid-*`
`send-manifest` | +| `OSEQ` | `1` | `deployment-*`
`lease-*`
`bid-*`
`send-manifest` | +| `GSEQ` | `1` | `deployment-*`
`lease-*`
`bid-*`
`send-manifest` | +| `KUSTOMIZE_INSTALLS` | Depends on runbook
Refer to each runbook's `Makefile` to see default value | `kustomize-init`
`kustomize-templates`
`kustomize-set-images`
`kustomize-configure-services`
`kustomize-deploy-services` | | + +##### Keys + +Each configuration creates four [keys](https://github.com/akash-network/provider/blob/gpu/_run/common.mk#L40..L43): +They keys are assigned to the targets and under normal circumstances there is no need to alter it. However, it can be done with setting `KEY_NAME`: + +```shell +# create provider from **provider** key +make provider-create + +# create provider from custom key +KEY_NAME=other make provider-create +``` + +#### How to use runbook + +##### Kube + +This runbook requires three terminals + +1. Open runbook + + __all three terminals__ + ```shell + cd _run/kube + ``` + +2. Create and provision local kind cluster. + + __t1 run__ + ```shell + make kube-cluster-setup + ``` +3. Start akash node + + __t2 run__ + ```shell + make node-run + ``` +4. Create provider + + __t1 run__ + ```shell + make provider-create + ``` + +5. Start the provider + + __t3 run__ + ```shell + make provider-create + ``` + +6. Start the provider + + __t1 run__ + ```shell + make provider-create + ``` + +7. __t1__ Create a deployment. Check that the deployment was created. Take note of the `dseq` - deployment sequence: + + ```shell + make deployment-create + ``` + + ```shell + make query-deployments + ``` + + After a short time, you should see an order created for this deployment with the following command: + + ```shell + make query-orders + ``` + + The Provider Services Daemon should see this order and bid on it. + + ```shell + make query-bids + ``` + +8. __t1 When a bid has been created, you may create a lease__ + + To create a lease, run + + ```shell + make lease-create + ``` + + You can see the lease with: + + ```shell + make query-leases + ``` + + You should now see "pending" inventory in the provider status: + + ```shell + make provider-status + ``` + +9. __t1 Distribute Manifest__ + + Now that you have a lease with a provider, you need to send your + workload configuration to that provider by sending it the manifest: + + ```shell + make send-manifest + ``` + + You can check the status of your deployment with: + + ```shell + make provider-lease-status + ``` + + You can reach your app with the following (Note: `Host:` header tomfoolery abound) + + ```shell + make provider-lease-ping + ``` + +10. __t1 Get service status__ + + ```sh + make provider-lease-status + ``` + + Fetch logs from deployed service (all pods) + + ```sh + make provider-lease-logs + ``` + +##### Kube for e2e tests + +This runbook requires two terminal + +1. Open runbook + + __t1__ + ```shell + cd _run/kube + ``` + +2. Create and provision local kind cluster for e2e testing. + + __t1 run__ + ```shell + make kube-cluster-setup-e2e + +3. Run e2e tests + + ```shell + make test-e2e-intergration + ``` + +##### Single + +##### SSH diff --git a/_docs/kustomize/akash-operator-discovery/daemonset.yaml b/_docs/kustomize/akash-operator-discovery/daemonset.yaml deleted file mode 100644 index 5a4a8711..00000000 --- a/_docs/kustomize/akash-operator-discovery/daemonset.yaml +++ /dev/null @@ -1,46 +0,0 @@ -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: operator-discovery - namespace: akash-services - labels: - akash.network: "true" - app.kubernetes.io/name: akash - app.kubernetes.io/instance: discovery - app.kubernetes.io/component: operator -spec: - selector: - matchLabels: - app.kubernetes.io/name: akash - app.kubernetes.io/instance: discovery - app.kubernetes.io/component: operator - replicas: 1 - revisionHistoryLimit: 1 - template: - metadata: - labels: - app: inventory-operator - app.kubernetes.io/name: akash - app.kubernetes.io/instance: discovery - app.kubernetes.io/component: operator - spec: - serviceAccountName: operator-discovery - containers: - - name: operator-discovery - image: ghcr.io/akash-network/provider - args: - - "provider-services" - - "operator" - - "inventory" - imagePullPolicy: IfNotPresent - resources: - limits: - cpu: 500m - memory: 512Mi - requests: - cpu: 100m - memory: 128Mi - ports: - - containerPort: 8080 - name: api - protocol: TCP diff --git a/_docs/kustomize/akash-operator-discovery/kustomization.yaml b/_docs/kustomize/akash-operator-discovery/kustomization.yaml deleted file mode 100644 index fd79eef6..00000000 --- a/_docs/kustomize/akash-operator-discovery/kustomization.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -namespace: akash-services -resources: - - deployment.yaml - - service.yaml - - rbac.yaml - - service_account.yaml - - cluster_role.yaml - - role-binding.yaml diff --git a/_docs/kustomize/akash-operator-discovery/rbac.yaml b/_docs/kustomize/akash-operator-discovery/rbac.yaml deleted file mode 100644 index ea1146b9..00000000 --- a/_docs/kustomize/akash-operator-discovery/rbac.yaml +++ /dev/null @@ -1,12 +0,0 @@ -#apiVersion: rbac.authorization.k8s.io/v1 -#kind: ClusterRoleBinding -#metadata: -# name: akash-ip-operator-manage-service -#subjects: -# - kind: ServiceAccount -# name: akash-ip-operator -# namespace: akash-services -#roleRef: -# kind: ClusterRole -# name: akash-ip-op-manage-service -# apiGroup: rbac.authorization.k8s.io diff --git a/_docs/kustomize/akash-operator-discovery/role-binding.yaml b/_docs/kustomize/akash-operator-discovery/role-binding.yaml deleted file mode 100644 index e42e451c..00000000 --- a/_docs/kustomize/akash-operator-discovery/role-binding.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: operator-discovery - labels: - akash.network: "true" - app.kubernetes.io/name: akash - app.kubernetes.io/instance: discovery - app.kubernetes.io/component: operator -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: operator-discovery -subjects: - - kind: ServiceAccount - name: operator-discovery - namespace: akash-services diff --git a/_docs/kustomize/akash-operator-discovery/service_account.yaml b/_docs/kustomize/akash-operator-discovery/service_account.yaml deleted file mode 100644 index f6d9befa..00000000 --- a/_docs/kustomize/akash-operator-discovery/service_account.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: operator-discovery - namespace: akash-services - labels: - akash.network: "true" - app.kubernetes.io/name: akash - app.kubernetes.io/instance: discovery - app.kubernetes.io/component: operator -automountServiceAccountToken: true diff --git a/_docs/kustomize/akash-operator-discovery/cluster_role.yaml b/_docs/kustomize/akash-operator-inventory/cluster-roles.yaml similarity index 61% rename from _docs/kustomize/akash-operator-discovery/cluster_role.yaml rename to _docs/kustomize/akash-operator-inventory/cluster-roles.yaml index fe972d19..97554796 100644 --- a/_docs/kustomize/akash-operator-discovery/cluster_role.yaml +++ b/_docs/kustomize/akash-operator-inventory/cluster-roles.yaml @@ -1,7 +1,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: inventory-operator + name: operator-inventory labels: akash.network: "true" app.kubernetes.io/name: akash @@ -15,18 +15,29 @@ rules: - nodes - pods - events + - services - persistentvolumes - persistentvolumeclaims verbs: - get - list - watch + - apiGroups: + - '' + resources: + - nodes + verbs: + - patch - apiGroups: - '' resources: - pods/exec verbs: - create + - delete + - get + - list + - watch - apiGroups: - storage.k8s.io resources: @@ -62,3 +73,30 @@ rules: - get - list - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: operator-inventory-node + labels: + akash.network: "true" + app.kubernetes.io/name: operator-inventory-node + app.kubernetes.io/component: inventory + app.kubernetes.io/part-of: operator +rules: + - apiGroups: + - '' + resources: + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - '' + resources: + - pods + verbs: + - get + - list + - watch diff --git a/_docs/kustomize/akash-operator-inventory/cluster_role.yaml b/_docs/kustomize/akash-operator-inventory/cluster_role.yaml deleted file mode 100644 index fe972d19..00000000 --- a/_docs/kustomize/akash-operator-inventory/cluster_role.yaml +++ /dev/null @@ -1,64 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: inventory-operator - labels: - akash.network: "true" - app.kubernetes.io/name: akash - app.kubernetes.io/instance: inventory - app.kubernetes.io/component: operator -rules: - - apiGroups: - - '' - resources: - - namespaces - - nodes - - pods - - events - - persistentvolumes - - persistentvolumeclaims - verbs: - - get - - list - - watch - - apiGroups: - - '' - resources: - - pods/exec - verbs: - - create - - apiGroups: - - storage.k8s.io - resources: - - storageclasses - verbs: - - get - - list - - watch - - apiGroups: - - ceph.rook.io - resources: - - cephclusters - - cephblockpools - verbs: - - get - - list - - watch - - apiGroups: - - akash.network - resources: - - inventoryrequests - verbs: - - get - - list - - watch - - apiGroups: - - akash.network - resources: - - inventories - verbs: - - create - - patch - - get - - list - - watch diff --git a/_docs/kustomize/akash-operator-inventory/config.yaml b/_docs/kustomize/akash-operator-inventory/config.yaml new file mode 100644 index 00000000..8e9476e0 --- /dev/null +++ b/_docs/kustomize/akash-operator-inventory/config.yaml @@ -0,0 +1,8 @@ +--- +version: v1 +cluster_storage: + - default + - beta2 +exclude: + nodes: [] + node_storage: [] diff --git a/_docs/kustomize/akash-operator-inventory/daemonset.yaml b/_docs/kustomize/akash-operator-inventory/daemonset.yaml new file mode 100644 index 00000000..8f3f8a8e --- /dev/null +++ b/_docs/kustomize/akash-operator-inventory/daemonset.yaml @@ -0,0 +1,54 @@ +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: operator-inventory-node + namespace: akash-services + labels: + akash.network: "true" + app.kubernetes.io/name: operator-inventory-node + app.kubernetes.io/component: inventory + app.kubernetes.io/part-of: operator +spec: + selector: + matchLabels: + app.kubernetes.io/name: operator-inventory-node + app.kubernetes.io/component: inventory + app.kubernetes.io/part-of: operator + template: + metadata: + labels: + akash.network: "true" + app.kubernetes.io/name: operator-inventory-node + app.kubernetes.io/component: inventory + app.kubernetes.io/part-of: operator + spec: + serviceAccountName: operator-inventory-node + containers: + - name: inventory-node + image: ghcr.io/akash-network/provider + args: + - "provider-services" + - "operator" + - "inventory" + - "node" + imagePullPolicy: IfNotPresent + ports: + - containerPort: 8081 + name: grpc + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" + env: + - name: AP_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: AP_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName diff --git a/_docs/kustomize/akash-operator-inventory/deployment.yaml b/_docs/kustomize/akash-operator-inventory/deployment.yaml index 75e9c5e0..dcb08651 100644 --- a/_docs/kustomize/akash-operator-inventory/deployment.yaml +++ b/_docs/kustomize/akash-operator-inventory/deployment.yaml @@ -1,7 +1,7 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: inventory-operator + name: operator-inventory namespace: akash-services labels: akash.network: "true" @@ -19,20 +19,23 @@ spec: template: metadata: labels: - app: inventory-operator + app: operator-inventory app.kubernetes.io/name: akash app.kubernetes.io/instance: inventory app.kubernetes.io/component: operator spec: - serviceAccountName: inventory-operator + serviceAccountName: operator-inventory containers: - - name: inventory-operator + - name: operator-inventory image: ghcr.io/akash-network/provider args: - "provider-services" - "operator" - "inventory" imagePullPolicy: IfNotPresent + env: + - name: AP_CONFIG + value: /akash/config.yaml resources: limits: cpu: 500m @@ -44,3 +47,13 @@ spec: - containerPort: 8080 name: api protocol: TCP + - containerPort: 8081 + name: grpc + protocol: TCP + volumeMounts: + - name: config + mountPath: /akash + volumes: + - name: config + configMap: + name: operator-inventory diff --git a/_docs/kustomize/akash-operator-inventory/kustomization.yaml b/_docs/kustomize/akash-operator-inventory/kustomization.yaml index fd79eef6..73238b1e 100644 --- a/_docs/kustomize/akash-operator-inventory/kustomization.yaml +++ b/_docs/kustomize/akash-operator-inventory/kustomization.yaml @@ -2,9 +2,13 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization namespace: akash-services resources: - - deployment.yaml + - service-accounts.yaml + - cluster-roles.yaml + - role-bindings.yaml - service.yaml - - rbac.yaml - - service_account.yaml - - cluster_role.yaml - - role-binding.yaml + - daemonset.yaml + - deployment.yaml +configMapGenerator: + - name: operator-inventory + files: + - config.yaml diff --git a/_docs/kustomize/akash-operator-inventory/role-binding.yaml b/_docs/kustomize/akash-operator-inventory/role-binding.yaml deleted file mode 100644 index 49766396..00000000 --- a/_docs/kustomize/akash-operator-inventory/role-binding.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: inventory-operator - labels: - akash.network: "true" - app.kubernetes.io/name: akash - app.kubernetes.io/instance: inventory - app.kubernetes.io/component: operator -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: inventory-operator -subjects: - - kind: ServiceAccount - name: inventory-operator - namespace: akash-services diff --git a/_docs/kustomize/akash-operator-inventory/rbac.yaml b/_docs/kustomize/akash-operator-inventory/role-bindings.yaml similarity index 50% rename from _docs/kustomize/akash-operator-inventory/rbac.yaml rename to _docs/kustomize/akash-operator-inventory/role-bindings.yaml index 0ff59351..ea664425 100644 --- a/_docs/kustomize/akash-operator-inventory/rbac.yaml +++ b/_docs/kustomize/akash-operator-inventory/role-bindings.yaml @@ -1,3 +1,40 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: operator-inventory + labels: + akash.network: "true" + app.kubernetes.io/name: akash + app.kubernetes.io/instance: inventory + app.kubernetes.io/component: operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-inventory +subjects: + - kind: ServiceAccount + name: operator-inventory + namespace: akash-services +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: operator-inventory-node + labels: + akash.network: "true" + app.kubernetes.io/name: operator-inventory-node + app.kubernetes.io/component: inventory + app.kubernetes.io/part-of: operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-inventory-node +subjects: + - kind: ServiceAccount + name: operator-inventory-node + namespace: akash-services +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: diff --git a/_docs/kustomize/akash-operator-inventory/service-accounts.yaml b/_docs/kustomize/akash-operator-inventory/service-accounts.yaml new file mode 100644 index 00000000..936019d2 --- /dev/null +++ b/_docs/kustomize/akash-operator-inventory/service-accounts.yaml @@ -0,0 +1,23 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: operator-inventory + namespace: akash-services + labels: + akash.network: "true" + app.kubernetes.io/name: akash + app.kubernetes.io/instance: inventory + app.kubernetes.io/component: operator +automountServiceAccountToken: true +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: operator-inventory-node + namespace: akash-services + labels: + akash.network: "true" + app.kubernetes.io/name: operator-inventory-node + app.kubernetes.io/component: inventory + app.kubernetes.io/part-of: operator diff --git a/_docs/kustomize/akash-operator-inventory/service.yaml b/_docs/kustomize/akash-operator-inventory/service.yaml index 97926187..c3bea5bf 100644 --- a/_docs/kustomize/akash-operator-inventory/service.yaml +++ b/_docs/kustomize/akash-operator-inventory/service.yaml @@ -6,7 +6,7 @@ metadata: app.kubernetes.io/name: akash app.kubernetes.io/instance: inventory app.kubernetes.io/component: operator - name: inventory-operator + name: operator-inventory namespace: akash-services spec: type: ClusterIP @@ -15,6 +15,10 @@ spec: port: 8080 targetPort: api appProtocol: http + - name: grpc + port: 8081 + targetPort: grpc + appProtocol: tcp selector: app.kubernetes.io/name: akash app.kubernetes.io/instance: inventory diff --git a/_docs/kustomize/akash-operator-inventory/service_account.yaml b/_docs/kustomize/akash-operator-inventory/service_account.yaml deleted file mode 100644 index 9be2d98b..00000000 --- a/_docs/kustomize/akash-operator-inventory/service_account.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: inventory-operator - namespace: akash-services - labels: - akash.network: "true" - app.kubernetes.io/name: akash - app.kubernetes.io/instance: inventory - app.kubernetes.io/component: operator -automountServiceAccountToken: true diff --git a/_docs/kustomize/templates/akash-operator-inventory/kustomization.yaml b/_docs/kustomize/templates/akash-operator-inventory/kustomization.yaml index 18c55e4a..c36f6a15 100644 --- a/_docs/kustomize/templates/akash-operator-inventory/kustomization.yaml +++ b/_docs/kustomize/templates/akash-operator-inventory/kustomization.yaml @@ -9,5 +9,11 @@ patches: target: kind: Deployment group: apps - name: inventory-operator + name: operator-inventory + version: v1 + - path: docker-image.yaml + target: + kind: DaemonSet + group: apps + name: operator-inventory-node version: v1 diff --git a/_run/common-kube.mk b/_run/common-kube.mk index e1a7bd09..6d280df8 100644 --- a/_run/common-kube.mk +++ b/_run/common-kube.mk @@ -156,6 +156,14 @@ kube-setup-ingress-default: kube-status-ingress-%: kubectl rollout status -n akash-services ingress $* --timeout=$(KUBE_ROLLOUT_TIMEOUT)s +.PHONY: kube-deployment-rollout-operator-inventory +kube-deployment-rollout-operator-inventory: + kubectl -n akash-services rollout status deployment operator-inventory --timeout=$(KUBE_ROLLOUT_TIMEOUT)s + kubectl -n akash-services wait pods -l akash.network/component=operator -l akash.network/component=inventory -l app.kubernetes.io/name=akash --for condition=Ready --timeout=$(KUBE_ROLLOUT_TIMEOUT)s + kubectl -n akash-services describe pods -l akash.network/component=operator -l akash.network/component=inventory -l app.kubernetes.io/name=operator-inventory-node + kubectl -n akash-services rollout status daemonset operator-inventory-node --timeout=$(KUBE_ROLLOUT_TIMEOUT)s + kubectl -n akash-services wait pods -l akash.network/component=operator -l akash.network/component=inventory -l app.kubernetes.io/name=operator-inventory-node --for condition=Ready --timeout=$(KUBE_ROLLOUT_TIMEOUT)s + .PHONY: kube-deployment-rollout-% kube-deployment-rollout-%: kubectl -n akash-services rollout status deployment $* --timeout=$(KUBE_ROLLOUT_TIMEOUT)s diff --git a/bidengine/order_test.go b/bidengine/order_test.go index b329f6f0..62c8c00f 100644 --- a/bidengine/order_test.go +++ b/bidengine/order_test.go @@ -6,7 +6,10 @@ import ( "testing" "time" + tpubsub "github.com/troian/pubsub" + "github.com/akash-network/provider/operator/waiter" + "github.com/akash-network/provider/tools/fromctx" sdk "github.com/cosmos/cosmos-sdk/types" @@ -181,7 +184,9 @@ func makeOrderForTest( cfg.Deposit = mtypes.DefaultBidMinDeposit cfg.MaxGroupVolumes = constants.DefaultMaxGroupVolumes - myService, err := NewService(context.Background(), mySession, scaffold.cluster, scaffold.testBus, waiter.NewNullWaiter(), cfg) + ctx := context.Background() + ctx = context.WithValue(ctx, fromctx.CtxKeyPubSub, tpubsub.New(ctx, 1000)) + myService, err := NewService(ctx, mySession, scaffold.cluster, scaffold.testBus, waiter.NewNullWaiter(), cfg) require.NoError(t, err) require.NotNil(t, myService) diff --git a/bidengine/service.go b/bidengine/service.go index ce188c13..045c2474 100644 --- a/bidengine/service.go +++ b/bidengine/service.go @@ -4,8 +4,10 @@ import ( "context" "errors" + provider "github.com/akash-network/akash-api/go/provider/v1" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + tpubsub "github.com/troian/pubsub" "github.com/boz/go-lifecycle" @@ -18,6 +20,8 @@ import ( "github.com/akash-network/provider/cluster" "github.com/akash-network/provider/operator/waiter" "github.com/akash-network/provider/session" + "github.com/akash-network/provider/tools/fromctx" + ptypes "github.com/akash-network/provider/types" ) var ( @@ -33,6 +37,7 @@ var ErrNotRunning = errors.New("not running") // StatusClient interface predefined with Status method type StatusClient interface { Status(context.Context) (*Status, error) + StatusV1(ctx context.Context) (*provider.BidEngineStatus, error) } var ( @@ -140,6 +145,15 @@ func (s *service) Status(ctx context.Context) (*Status, error) { } } +func (s *service) StatusV1(ctx context.Context) (*provider.BidEngineStatus, error) { + res, err := s.Status(ctx) + if err != nil { + return nil, err + } + + return &provider.BidEngineStatus{Orders: res.Orders}, nil +} + func (s *service) updateOrderManagerGauge() { orderManagerGauge.Set(float64(len(s.orders))) } @@ -167,6 +181,20 @@ func (s *service) run(ctx context.Context, existingOrders []mtypes.OrderID) { s.orders[key] = order s.updateOrderManagerGauge() } + + bus := fromctx.PubSubFromCtx(ctx) + + signalch := make(chan struct{}, 1) + trySignal := func() { + select { + case signalch <- struct{}{}: + case <-s.lc.ShutdownRequest(): + default: + } + } + + trySignal() + loop: for { select { @@ -196,6 +224,7 @@ loop: ordersCounter.WithLabelValues("start").Inc() s.orders[key] = order + trySignal() } case ch := <-s.statusch: ch <- &Status{ @@ -206,6 +235,9 @@ loop: key := mquery.OrderPath(order.orderID) delete(s.orders, key) ordersCounter.WithLabelValues("stop").Inc() + trySignal() + case <-signalch: + bus.Pub(provider.BidEngineStatus{Orders: uint32(len(s.orders))}, []string{ptypes.PubSubTopicBidengineStatus}, tpubsub.WithRetain()) } s.updateOrderManagerGauge() } diff --git a/cluster/client.go b/cluster/client.go index fa2fe956..679dc3e4 100644 --- a/cluster/client.go +++ b/cluster/client.go @@ -9,9 +9,11 @@ import ( "sync" "time" + inventoryV1 "github.com/akash-network/akash-api/go/inventory/v1" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/pkg/errors" eventsv1 "k8s.io/api/events/v1" + "k8s.io/apimachinery/pkg/api/resource" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/version" "k8s.io/client-go/tools/remotecommand" @@ -346,6 +348,45 @@ func (inv *inventory) Metrics() ctypes.InventoryMetrics { return ret } +func (inv *inventory) Snapshot() inventoryV1.Cluster { + res := inventoryV1.Cluster{ + Nodes: make(inventoryV1.Nodes, 0, len(inv.nodes)), + Storage: make(inventoryV1.ClusterStorage, 0, len(inv.storage)), + } + + for _, nd := range inv.nodes { + res.Nodes = append(res.Nodes, inventoryV1.Node{ + Name: nd.id, + Resources: inventoryV1.NodeResources{ + CPU: inventoryV1.CPU{ + Quantity: inventoryV1.NewResourcePair(nd.cpu.allocatable.Int64(), nd.cpu.allocated.Int64(), "m"), + }, + Memory: inventoryV1.Memory{ + Quantity: inventoryV1.NewResourcePair(nd.memory.allocatable.Int64(), nd.memory.allocated.Int64(), resource.DecimalSI), + }, + GPU: inventoryV1.GPU{ + Quantity: inventoryV1.NewResourcePair(nd.gpu.allocatable.Int64(), nd.memory.allocated.Int64(), resource.DecimalSI), + }, + EphemeralStorage: inventoryV1.NewResourcePair(nd.ephemeralStorage.allocatable.Int64(), nd.ephemeralStorage.allocated.Int64(), resource.DecimalSI), + VolumesAttached: inventoryV1.NewResourcePair(0, 0, resource.DecimalSI), + VolumesMounted: inventoryV1.NewResourcePair(0, 0, resource.DecimalSI), + }, + Capabilities: inventoryV1.NodeCapabilities{}, + }) + } + + for class, storage := range inv.storage { + res.Storage = append(res.Storage, inventoryV1.Storage{ + Quantity: inventoryV1.NewResourcePair(storage.allocatable.Int64(), storage.allocated.Int64(), resource.DecimalSI), + Info: inventoryV1.StorageInfo{ + Class: class, + }, + }) + } + + return res +} + func (inv *inventory) dup() *inventory { res := &inventory{ nodes: make([]*node, 0, len(inv.nodes)), diff --git a/cluster/inventory.go b/cluster/inventory.go index 45108c5b..30f31c50 100644 --- a/cluster/inventory.go +++ b/cluster/inventory.go @@ -8,9 +8,11 @@ import ( "sync/atomic" "time" + provider "github.com/akash-network/akash-api/go/provider/v1" "github.com/boz/go-lifecycle" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + tpubsub "github.com/troian/pubsub" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/tendermint/tendermint/libs/log" @@ -28,6 +30,8 @@ import ( "github.com/akash-network/provider/event" ipoptypes "github.com/akash-network/provider/operator/ipoperator/types" "github.com/akash-network/provider/operator/waiter" + "github.com/akash-network/provider/tools/fromctx" + ptypes "github.com/akash-network/provider/types" ) var ( @@ -62,12 +66,29 @@ var ( }, []string{"quantity"}) ) +type invSnapshotResp struct { + res *provider.Inventory + err error +} + +type inventoryRequest struct { + order mtypes.OrderID + resources dtypes.ResourceGroup + ch chan<- inventoryResponse +} + +type inventoryResponse struct { + value ctypes.Reservation + err error +} + type inventoryService struct { config Config client Client sub pubsub.Subscriber statusch chan chan<- ctypes.InventoryStatus + statusV1ch chan chan<- invSnapshotResp lookupch chan inventoryRequest reservech chan inventoryRequest unreservech chan inventoryRequest @@ -86,16 +107,15 @@ type inventoryService struct { } func newInventoryService( + ctx context.Context, config Config, log log.Logger, - donech <-chan struct{}, sub pubsub.Subscriber, client Client, ipOperatorClient operatorclients.IPOperatorClient, waiter waiter.OperatorWaiter, deployments []ctypes.IDeployment, ) (*inventoryService, error) { - sub, err := sub.Clone() if err != nil { return nil, err @@ -106,6 +126,7 @@ func newInventoryService( client: client, sub: sub, statusch: make(chan chan<- ctypes.InventoryStatus), + statusV1ch: make(chan chan<- invSnapshotResp), lookupch: make(chan inventoryRequest), reservech: make(chan inventoryRequest), unreservech: make(chan inventoryRequest), @@ -122,9 +143,7 @@ func newInventoryService( reservations = append(reservations, newReservation(d.LeaseID().OrderID(), d.ManifestGroup())) } - ctx, _ := TieContextToChannel(context.Background(), donech) - - go is.lc.WatchChannel(donech) + go is.lc.WatchChannel(ctx.Done()) go is.run(ctx, reservations) return is, nil @@ -229,15 +248,25 @@ func (is *inventoryService) status(ctx context.Context) (ctypes.InventoryStatus, } } -type inventoryRequest struct { - order mtypes.OrderID - resources dtypes.ResourceGroup - ch chan<- inventoryResponse -} +func (is *inventoryService) statusV1(ctx context.Context) (*provider.Inventory, error) { + ch := make(chan invSnapshotResp, 1) -type inventoryResponse struct { - value ctypes.Reservation - err error + select { + case <-is.lc.Done(): + return nil, ErrNotRunning + case <-ctx.Done(): + return nil, ctx.Err() + case is.statusV1ch <- ch: + } + + select { + case <-is.lc.Done(): + return nil, ErrNotRunning + case <-ctx.Done(): + return nil, ctx.Err() + case result := <-ch: + return result.res, result.err + } } func (is *inventoryService) resourcesToCommit(rgroup dtypes.ResourceGroup) dtypes.ResourceGroup { @@ -391,7 +420,7 @@ func (is *inventoryService) handleRequest(req inventoryRequest, state *inventory { jReservation, _ := json.Marshal(req.resources.GetResourceUnits()) - is.log.Debug("reservation requested", "order", req.order, fmt.Sprintf("resources=%s", jReservation)) + is.log.Debug(fmt.Sprintf("reservation requested. order=%s, resources=%s", req.order, jReservation)) } if reservation.endpointQuantity != 0 { @@ -468,13 +497,23 @@ func (is *inventoryService) run(ctx context.Context, reservationsArg []*reservat } } + bus := fromctx.PubSubFromCtx(ctx) + + signalch := make(chan struct{}, 1) + trySignal := func() { + select { + case signalch <- struct{}{}: + case <-is.lc.ShutdownRequest(): + default: + } + } + loop: for { select { case err := <-is.lc.ShutdownRequest(): is.lc.ShutdownInitiated(err) break loop - case ev := <-is.sub.Events(): switch ev := ev.(type) { // nolint: gocritic case event.ClusterDeployment: @@ -508,10 +547,8 @@ loop: break } } - case req := <-reserveChLocal: is.handleRequest(req, state) - case req := <-is.lookupch: // lookup registration for _, res := range state.reservations { @@ -528,7 +565,6 @@ loop: inventoryRequestsCounter.WithLabelValues("lookup", "not-found").Inc() req.ch <- inventoryResponse{err: errReservationNotFound} - case req := <-is.unreservech: is.log.Debug("unreserving capacity", "order", req.order) // remove reservation @@ -556,18 +592,22 @@ loop: inventoryRequestsCounter.WithLabelValues("unreserve", "not-found").Inc() req.ch <- inventoryResponse{err: errReservationNotFound} - case responseCh := <-is.statusch: responseCh <- is.getStatus(state) inventoryRequestsCounter.WithLabelValues("status", "success").Inc() - + case responseCh := <-is.statusV1ch: + resp, err := is.getStatusV1(state) + responseCh <- invSnapshotResp{ + res: resp, + err: err, + } + inventoryRequestsCounter.WithLabelValues("status", "success").Inc() case <-t.C: // run cluster inventory check t.Stop() // Run an inventory check updateInventory() - case res := <-runch: // inventory check returned runch = nil @@ -632,6 +672,10 @@ loop: } resumeProcessingReservations() + + trySignal() + case <-signalch: + bus.Pub(state.inventory.Snapshot(), []string{ptypes.PubSubTopicInventoryStatus}, tpubsub.WithRetain()) } updateReservationMetrics(state.reservations) @@ -718,6 +762,7 @@ func (is *inventoryService) runCheck(ctx context.Context, state *inventoryServic func (is *inventoryService) getStatus(state *inventoryServiceState) ctypes.InventoryStatus { status := ctypes.InventoryStatus{} + if state.inventory == nil { status.Error = errInventoryNotAvailableYet return status @@ -746,9 +791,43 @@ func (is *inventoryService) getStatus(state *inventoryServiceState) ctypes.Inven for class, size := range state.inventory.Metrics().TotalAvailable.Storage { status.Available.Storage = append(status.Available.Storage, ctypes.InventoryStorageStatus{Class: class, Size: size}) } + return status } +func (is *inventoryService) getStatusV1(state *inventoryServiceState) (*provider.Inventory, error) { + if state.inventory == nil { + return nil, errInventoryNotAvailableYet + } + + status := &provider.Inventory{ + Cluster: state.inventory.Snapshot(), + Reservations: provider.Reservations{ + Pending: provider.ReservationsMetric{ + Count: 0, + Resources: provider.NewResourcesMetric(), + }, + Active: provider.ReservationsMetric{ + Count: 0, + Resources: provider.NewResourcesMetric(), + }, + }, + } + + for _, reservation := range state.reservations { + runits := reservation.Resources().GetResourceUnits() + if reservation.allocated { + status.Reservations.Active.Resources.AddResourceUnits(runits) + status.Reservations.Active.Count++ + } else { + status.Reservations.Pending.Resources.AddResourceUnits(runits) + status.Reservations.Pending.Count++ + } + } + + return status, nil +} + func reservationCountEndpoints(reservation *reservation) uint { var externalPortCount uint diff --git a/cluster/inventory_test.go b/cluster/inventory_test.go index e5f95ae3..a3c86a2d 100644 --- a/cluster/inventory_test.go +++ b/cluster/inventory_test.go @@ -8,6 +8,7 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + tpubsub "github.com/troian/pubsub" manifest "github.com/akash-network/akash-api/go/manifest/v2beta2" dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" @@ -23,6 +24,7 @@ import ( "github.com/akash-network/provider/event" ipoptypes "github.com/akash-network/provider/operator/ipoperator/types" "github.com/akash-network/provider/operator/waiter" + "github.com/akash-network/provider/tools/fromctx" ) func newInventory(nodes ...string) ctypes.Inventory { @@ -125,7 +127,6 @@ func TestInventory_ClusterDeploymentNotDeployed(t *testing.T) { InventoryExternalPortQuantity: 1000, } myLog := testutil.Logger(t) - donech := make(chan struct{}) bus := pubsub.NewBus() subscriber, err := bus.Subscribe() require.NoError(t, err) @@ -138,10 +139,13 @@ func TestInventory_ClusterDeploymentNotDeployed(t *testing.T) { clusterClient.On("Inventory", mock.Anything).Return(clusterInv, nil) + ctx, cancel := context.WithCancel(context.Background()) + ctx = context.WithValue(ctx, fromctx.CtxKeyPubSub, tpubsub.New(ctx, 1000)) + inv, err := newInventoryService( + ctx, config, myLog, - donech, subscriber, clusterClient, operatorclients.NullIPOperatorClient(), // This client is not used in this test @@ -150,7 +154,7 @@ func TestInventory_ClusterDeploymentNotDeployed(t *testing.T) { require.NoError(t, err) require.NotNil(t, inv) - close(donech) + cancel() <-inv.lc.Done() // No ports used yet @@ -165,7 +169,6 @@ func TestInventory_ClusterDeploymentDeployed(t *testing.T) { InventoryExternalPortQuantity: 1000, } myLog := testutil.Logger(t) - donech := make(chan struct{}) bus := pubsub.NewBus() subscriber, err := bus.Subscribe() require.NoError(t, err) @@ -227,10 +230,13 @@ func TestInventory_ClusterDeploymentDeployed(t *testing.T) { inventoryCalled <- 0 // Value does not matter }).Return(clusterInv, nil) + ctx, cancel := context.WithCancel(context.Background()) + ctx = context.WithValue(ctx, fromctx.CtxKeyPubSub, tpubsub.New(ctx, 1000)) + inv, err := newInventoryService( + ctx, config, myLog, - donech, subscriber, clusterClient, nil, // No IP operator client @@ -289,7 +295,7 @@ func TestInventory_ClusterDeploymentDeployed(t *testing.T) { require.Equal(t, uint(1000), inv.availableExternalPorts) // Shut everything down - close(donech) + cancel() <-inv.lc.Done() } @@ -445,10 +451,14 @@ func TestInventory_ReserveIPNoIPOperator(t *testing.T) { subscriber, err := scaffold.bus.Subscribe() require.NoError(t, err) + + ctx, cancel := context.WithCancel(context.Background()) + ctx = context.WithValue(ctx, fromctx.CtxKeyPubSub, tpubsub.New(ctx, 1000)) + inv, err := newInventoryService( + ctx, config, myLog, - scaffold.donech, subscriber, scaffold.clusterClient, nil, // No IP operator client @@ -463,6 +473,7 @@ func TestInventory_ReserveIPNoIPOperator(t *testing.T) { require.Nil(t, reservation) // Shut everything down + cancel() close(scaffold.donech) <-inv.lc.Done() } @@ -490,10 +501,13 @@ func TestInventory_ReserveIPUnavailableWithIPOperator(t *testing.T) { }, nil) mockIP.On("Stop") + ctx, cancel := context.WithCancel(context.Background()) + ctx = context.WithValue(ctx, fromctx.CtxKeyPubSub, tpubsub.New(ctx, 1000)) + inv, err := newInventoryService( + ctx, config, myLog, - scaffold.donech, subscriber, scaffold.clusterClient, mockIP, @@ -508,6 +522,7 @@ func TestInventory_ReserveIPUnavailableWithIPOperator(t *testing.T) { require.Nil(t, reservation) // Shut everything down + cancel() close(scaffold.donech) <-inv.lc.Done() } @@ -553,10 +568,13 @@ func TestInventory_ReserveIPAvailableWithIPOperator(t *testing.T) { mockIP.On("Stop") + ctx, cancel := context.WithCancel(context.Background()) + ctx = context.WithValue(ctx, fromctx.CtxKeyPubSub, tpubsub.New(ctx, 1000)) + inv, err := newInventoryService( + ctx, config, myLog, - scaffold.donech, subscriber, scaffold.clusterClient, mockIP, @@ -592,6 +610,7 @@ func TestInventory_ReserveIPAvailableWithIPOperator(t *testing.T) { require.NotNil(t, reservation) // Shut everything down + cancel() close(scaffold.donech) <-inv.lc.Done() @@ -657,10 +676,13 @@ func TestInventory_OverReservations(t *testing.T) { InventoryExternalPortQuantity: 1000, } + ctx, cancel := context.WithCancel(context.Background()) + ctx = context.WithValue(ctx, fromctx.CtxKeyPubSub, tpubsub.New(ctx, 1000)) + inv, err := newInventoryService( + ctx, config, myLog, - scaffold.donech, subscriber, scaffold.clusterClient, nil, // No IP operator client @@ -703,7 +725,8 @@ func TestInventory_OverReservations(t *testing.T) { // Wait for second call to inventory testutil.ChannelWaitForValueUpTo(t, scaffold.inventoryCalled, 30*time.Second) - // // Shut everything down + // Shut everything down + cancel() close(scaffold.donech) <-inv.lc.Done() diff --git a/cluster/kube/client_exec.go b/cluster/kube/client_exec.go index 149ce053..89f506a2 100644 --- a/cluster/kube/client_exec.go +++ b/cluster/kube/client_exec.go @@ -2,6 +2,7 @@ package kube import ( "context" + "errors" "fmt" "io" "sort" @@ -179,8 +180,9 @@ loop: // Check to see if the process ran & returned an exit code // If this is true, don't return an error. Something ran in the // container which is what this code was trying to do - if err, ok := err.(executil.CodeExitError); ok { - return execResult{exitCode: err.Code}, nil + terr := executil.CodeExitError{} + if errors.As(err, &terr) { + return execResult{exitCode: terr.Code}, nil } // Some errors are untyped, use string matching to give better answers diff --git a/cluster/kube/clientcommon/open_kube_config.go b/cluster/kube/clientcommon/open_kube_config.go index f8eb24a9..093c94a0 100644 --- a/cluster/kube/clientcommon/open_kube_config.go +++ b/cluster/kube/clientcommon/open_kube_config.go @@ -12,7 +12,7 @@ import ( func OpenKubeConfig(cfgPath string, log log.Logger) (*rest.Config, error) { // Always bypass the default rate limiting - rateLimiter := flowcontrol.NewFakeAlwaysRateLimiter() + rateLimiter := flowcontrol.NewTokenBucketRateLimiter(1000, 3000) // if cfgPath contains value it is either set to default value $HOME/.kube/config // or explicitly by env/flag AP_KUBECONFIG/--kubeconfig @@ -36,6 +36,7 @@ func OpenKubeConfig(cfgPath string, log log.Logger) (*rest.Config, error) { return cfg, fmt.Errorf("%w: error building kubernetes config", err) } cfg.RateLimiter = rateLimiter + // cfg.Timeout return cfg, err } diff --git a/cluster/kube/invendory_node.go b/cluster/kube/invendory_node.go index e7d0b43d..8c542fd5 100644 --- a/cluster/kube/invendory_node.go +++ b/cluster/kube/invendory_node.go @@ -1,185 +1,189 @@ package kube -import ( - "fmt" - - types "github.com/akash-network/akash-api/go/node/types/v1beta3" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - - "github.com/akash-network/provider/cluster/kube/builder" - ctypes "github.com/akash-network/provider/cluster/types/v1beta3" - crd "github.com/akash-network/provider/pkg/apis/akash.network/v2beta2" -) - -const ( - runtimeClassNvidia = "nvidia" -) - -type node struct { - id string - cpu resourcePair - gpu resourcePair - memory resourcePair - ephemeralStorage resourcePair - volumesAttached resourcePair - volumesMounted resourcePair - capabilities *crd.NodeInfoCapabilities -} - -func newNode(nodeStatus *corev1.NodeStatus, capabilities *crd.NodeInfoCapabilities) *node { - mzero := resource.NewMilliQuantity(0, resource.DecimalSI) - zero := resource.NewQuantity(0, resource.DecimalSI) - - gpu := *resource.NewQuantity(0, resource.DecimalSI) - if capabilities != nil { - var resourceName corev1.ResourceName - switch capabilities.GPU.Vendor { - case builder.GPUVendorNvidia: - resourceName = builder.ResourceGPUNvidia - case builder.GPUVendorAMD: - resourceName = builder.ResourceGPUAMD - } - - gpu = nodeStatus.Allocatable.Name(resourceName, resource.DecimalSI).DeepCopy() - } - - nd := &node{ - cpu: newResourcePair(nodeStatus.Allocatable.Cpu().DeepCopy(), mzero.DeepCopy()), - gpu: newResourcePair(gpu, zero.DeepCopy()), - memory: newResourcePair(nodeStatus.Allocatable.Memory().DeepCopy(), zero.DeepCopy()), - ephemeralStorage: newResourcePair(nodeStatus.Allocatable.StorageEphemeral().DeepCopy(), zero.DeepCopy()), - volumesAttached: newResourcePair(*resource.NewQuantity(int64(len(nodeStatus.VolumesAttached)), resource.DecimalSI), zero.DeepCopy()), - capabilities: capabilities, - } - - return nd -} - -func (nd *node) addAllocatedResources(rl corev1.ResourceList) { - for name, quantity := range rl { - switch name { - case corev1.ResourceCPU: - nd.cpu.allocated.Add(quantity) - case corev1.ResourceMemory: - nd.memory.allocated.Add(quantity) - case corev1.ResourceEphemeralStorage: - nd.ephemeralStorage.allocated.Add(quantity) - case builder.ResourceGPUNvidia: - fallthrough - case builder.ResourceGPUAMD: - nd.gpu.allocated.Add(quantity) - } - } -} - -func (nd *node) dup() *node { - res := &node{ - id: nd.id, - cpu: *nd.cpu.dup(), - gpu: *nd.gpu.dup(), - memory: *nd.memory.dup(), - ephemeralStorage: *nd.ephemeralStorage.dup(), - volumesAttached: *nd.volumesAttached.dup(), - volumesMounted: *nd.volumesMounted.dup(), - capabilities: nd.capabilities.DeepCopy(), - } - - return res -} - -func (nd *node) tryAdjustCPU(res *types.CPU) bool { - return nd.cpu.subMilliNLZ(res.Units) -} - -func (nd *node) tryAdjustGPU(res *types.GPU, sparams *crd.SchedulerParams) bool { - if res.Units.Value() == 0 { - return true - } - - // GPUs cannot be reserved until node capabilities available - if nd.capabilities == nil { - return false - } - - attrs, err := ctypes.ParseGPUAttributes(res.Attributes) - if err != nil { - return false - } - - models, match := attrs[nd.capabilities.GPU.Vendor] - if !match { - return false - } - - var model string - for _, m := range models { - if m == nd.capabilities.GPU.Model || m == "*" { - model = nd.capabilities.GPU.Model - break - } - } - - if model == "" { - return false - } - - if !nd.gpu.subNLZ(res.Units) { - return false - } - - sParamsEnsureGPU(sparams) - sparams.Resources.GPU.Vendor = nd.capabilities.GPU.Vendor - sparams.Resources.GPU.Model = model - - switch nd.capabilities.GPU.Vendor { - case builder.GPUVendorNvidia: - sparams.RuntimeClass = runtimeClassNvidia - default: - } - - res.Attributes = types.Attributes{ - { - Key: fmt.Sprintf("vendor/%s/model/%s", nd.capabilities.GPU.Vendor, model), - Value: "true", - }, - } - - return true -} - -func sParamsEnsureGPU(sparams *crd.SchedulerParams) { - sParamsEnsureResources(sparams) - - if sparams.Resources.GPU == nil { - sparams.Resources.GPU = &crd.SchedulerResourceGPU{} - } -} - -func sParamsEnsureResources(sparams *crd.SchedulerParams) { - if sparams.Resources == nil { - sparams.Resources = &crd.SchedulerResources{} - } -} - -func (nd *node) tryAdjustMemory(res *types.Memory) bool { - return nd.memory.subNLZ(res.Quantity) -} - -func (nd *node) tryAdjustEphemeralStorage(res *types.Storage) bool { - return nd.ephemeralStorage.subNLZ(res.Quantity) -} - -// nolint: unused -func (nd *node) tryAdjustVolumesAttached(res types.ResourceValue) bool { - return nd.volumesAttached.subNLZ(res) -} - -func (cn clusterNodes) dup() clusterNodes { - ret := make(clusterNodes) - - for name, nd := range cn { - ret[name] = nd.dup() - } - return ret -} +// import ( +// +// "fmt" +// +// types "github.com/akash-network/akash-api/go/node/types/v1beta3" +// corev1 "k8s.io/api/core/v1" +// "k8s.io/apimachinery/pkg/api/resource" +// +// "github.com/akash-network/provider/cluster/kube/builder" +// ctypes "github.com/akash-network/provider/cluster/types/v1beta3" +// crd "github.com/akash-network/provider/pkg/apis/akash.network/v2beta2" +// +// ) +// +// const ( +// +// runtimeClassNvidia = "nvidia" +// +// ) +// +// type node struct { +// id string +// cpu resourcePair +// gpu resourcePair +// memory resourcePair +// ephemeralStorage resourcePair +// volumesAttached resourcePair +// volumesMounted resourcePair +// capabilities *crd.NodeInfoCapabilities +// } +// +// func newNode(nodeStatus *corev1.NodeStatus, capabilities *crd.NodeInfoCapabilities) *node { +// mzero := resource.NewMilliQuantity(0, resource.DecimalSI) +// zero := resource.NewQuantity(0, resource.DecimalSI) +// +// gpu := *resource.NewQuantity(0, resource.DecimalSI) +// if capabilities != nil { +// var resourceName corev1.ResourceName +// switch capabilities.GPU.Vendor { +// case builder.GPUVendorNvidia: +// resourceName = builder.ResourceGPUNvidia +// case builder.GPUVendorAMD: +// resourceName = builder.ResourceGPUAMD +// } +// +// gpu = nodeStatus.Allocatable.Name(resourceName, resource.DecimalSI).DeepCopy() +// } +// +// nd := &node{ +// cpu: newResourcePair(nodeStatus.Allocatable.Cpu().DeepCopy(), mzero.DeepCopy()), +// gpu: newResourcePair(gpu, zero.DeepCopy()), +// memory: newResourcePair(nodeStatus.Allocatable.Memory().DeepCopy(), zero.DeepCopy()), +// ephemeralStorage: newResourcePair(nodeStatus.Allocatable.StorageEphemeral().DeepCopy(), zero.DeepCopy()), +// volumesAttached: newResourcePair(*resource.NewQuantity(int64(len(nodeStatus.VolumesAttached)), resource.DecimalSI), zero.DeepCopy()), +// capabilities: capabilities, +// } +// +// return nd +// } +// +// func (nd *node) addAllocatedResources(rl corev1.ResourceList) { +// for name, quantity := range rl { +// switch name { +// case corev1.ResourceCPU: +// nd.cpu.allocated.Add(quantity) +// case corev1.ResourceMemory: +// nd.memory.allocated.Add(quantity) +// case corev1.ResourceEphemeralStorage: +// nd.ephemeralStorage.allocated.Add(quantity) +// case builder.ResourceGPUNvidia: +// fallthrough +// case builder.ResourceGPUAMD: +// nd.gpu.allocated.Add(quantity) +// } +// } +// } +// +// func (nd *node) dup() *node { +// res := &node{ +// id: nd.id, +// cpu: *nd.cpu.dup(), +// gpu: *nd.gpu.dup(), +// memory: *nd.memory.dup(), +// ephemeralStorage: *nd.ephemeralStorage.dup(), +// volumesAttached: *nd.volumesAttached.dup(), +// volumesMounted: *nd.volumesMounted.dup(), +// capabilities: nd.capabilities.DeepCopy(), +// } +// +// return res +// } +// +// func (nd *node) tryAdjustCPU(res *types.CPU) bool { +// return nd.cpu.subMilliNLZ(res.Units) +// } +// +// func (nd *node) tryAdjustGPU(res *types.GPU, sparams *crd.SchedulerParams) bool { +// if res.Units.Value() == 0 { +// return true +// } +// +// // GPUs cannot be reserved until node capabilities available +// if nd.capabilities == nil { +// return false +// } +// +// attrs, err := ctypes.ParseGPUAttributes(res.Attributes) +// if err != nil { +// return false +// } +// +// models, match := attrs[nd.capabilities.GPU.Vendor] +// if !match { +// return false +// } +// +// var model string +// for _, m := range models { +// if m == nd.capabilities.GPU.Model || m == "*" { +// model = nd.capabilities.GPU.Model +// break +// } +// } +// +// if model == "" { +// return false +// } +// +// if !nd.gpu.subNLZ(res.Units) { +// return false +// } +// +// sParamsEnsureGPU(sparams) +// sparams.Resources.GPU.Vendor = nd.capabilities.GPU.Vendor +// sparams.Resources.GPU.Model = model +// +// switch nd.capabilities.GPU.Vendor { +// case builder.GPUVendorNvidia: +// sparams.RuntimeClass = runtimeClassNvidia +// default: +// } +// +// res.Attributes = types.Attributes{ +// { +// Key: fmt.Sprintf("vendor/%s/model/%s", nd.capabilities.GPU.Vendor, model), +// Value: "true", +// }, +// } +// +// return true +// } +// func sParamsEnsureGPU(sparams *crd.SchedulerParams) { +// sParamsEnsureResources(sparams) +// +// if sparams.Resources.GPU == nil { +// sparams.Resources.GPU = &crd.SchedulerResourceGPU{} +// } +// } +// +// func sParamsEnsureResources(sparams *crd.SchedulerParams) { +// if sparams.Resources == nil { +// sparams.Resources = &crd.SchedulerResources{} +// } +// } + +// +// func (nd *node) tryAdjustMemory(res *types.Memory) bool { +// return nd.memory.subNLZ(res.Quantity) +// } +// +// func (nd *node) tryAdjustEphemeralStorage(res *types.Storage) bool { +// return nd.ephemeralStorage.subNLZ(res.Quantity) +// } +// +// // nolint: unused +// func (nd *node) tryAdjustVolumesAttached(res types.ResourceValue) bool { +// return nd.volumesAttached.subNLZ(res) +// } + +// func (cn clusterNodes) dup() clusterNodes { +// ret := make(clusterNodes) +// +// for name, nd := range cn { +// ret[name] = nd.dup() +// } +// return ret +// } diff --git a/cluster/kube/inventory.go b/cluster/kube/inventory.go index 5833d3e6..51a86086 100644 --- a/cluster/kube/inventory.go +++ b/cluster/kube/inventory.go @@ -3,18 +3,16 @@ package kube import ( "context" "encoding/json" + "errors" "fmt" "reflect" "strings" "time" + inventoryV1 "github.com/akash-network/akash-api/go/inventory/v1" dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/tools/pager" - "github.com/tendermint/tendermint/libs/log" types "github.com/akash-network/akash-api/go/node/types/v1beta3" @@ -25,24 +23,28 @@ import ( ) const ( - inventoryOperatorQueryTimeout = 5 * time.Second + inventoryOperatorQueryTimeout = 10 * time.Second ) -type clusterNodes map[string]*node +const ( + runtimeClassNvidia = "nvidia" +) + +var ( + ErrInventoryNotAvailable = errors.New("kube: inventory service not available") +) type inventory struct { - storageClasses clusterStorage - nodes clusterNodes - log log.Logger + inventoryV1.Cluster + log log.Logger } var _ ctypes.Inventory = (*inventory)(nil) -func newInventory(log log.Logger, storage clusterStorage, nodes map[string]*node) *inventory { +func newInventory(log log.Logger, clState inventoryV1.Cluster) *inventory { inv := &inventory{ - storageClasses: storage, - nodes: nodes, - log: log, + Cluster: clState, + log: log, } return inv @@ -50,9 +52,8 @@ func newInventory(log log.Logger, storage clusterStorage, nodes map[string]*node func (inv *inventory) dup() inventory { dup := inventory{ - storageClasses: inv.storageClasses.dup(), - nodes: inv.nodes.dup(), - log: inv.log, + Cluster: *inv.Cluster.Dup(), + log: inv.log, } return dup @@ -61,23 +62,23 @@ func (inv *inventory) dup() inventory { // tryAdjust cluster inventory // It returns two boolean values. First indicates if node-wide resources satisfy (true) requirements // Seconds indicates if cluster-wide resources satisfy (true) requirements -func (inv *inventory) tryAdjust(node string, res *types.Resources) (*crd.SchedulerParams, bool, bool) { - nd := inv.nodes[node].dup() +func (inv *inventory) tryAdjust(node int, res *types.Resources) (*crd.SchedulerParams, bool, bool) { + nd := inv.Nodes[node].Dup() sparams := &crd.SchedulerParams{} - if !nd.tryAdjustCPU(res.CPU) { + if !tryAdjustCPU(&nd.Resources.CPU.Quantity, res.CPU) { return nil, false, true } - if !nd.tryAdjustGPU(res.GPU, sparams) { + if !tryAdjustGPU(&nd.Resources.GPU, res.GPU, sparams) { return nil, false, true } - if !nd.tryAdjustMemory(res.Memory) { + if !tryAdjustMemory(&nd.Resources.Memory.Quantity, res.Memory) { return nil, false, true } - storageClasses := inv.storageClasses.dup() + storageClasses := inv.Storage.Dup() for i, storage := range res.Storage { attrs, err := ctypes.ParseStorageAttributes(storage.Attributes) @@ -86,32 +87,45 @@ func (inv *inventory) tryAdjust(node string, res *types.Resources) (*crd.Schedul } if !attrs.Persistent { - if !nd.tryAdjustEphemeralStorage(&res.Storage[i]) { + if !tryAdjustEphemeralStorage(&nd.Resources.EphemeralStorage, &res.Storage[i]) { return nil, false, true } continue } - if !nd.capabilities.Storage.HasClass(attrs.Class) { + if !nd.IsStorageClassSupported(attrs.Class) { return nil, false, true } // if !nd.tryAdjustVolumesAttached(types.NewResourceValue(1)) { // return nil, false, true + // } - // no need to check if storageClass map has class present as it has been validated - // for particular node during inventory fetch - if !storageClasses[attrs.Class].subNLZ(storage.Quantity) { - // cluster storage does not have enough space thus break to error + storageAdjusted := false + + for idx := range storageClasses { + if storageClasses[idx].Info.Class == attrs.Class { + if !storageClasses[idx].Quantity.SubNLZ(storage.Quantity) { + // cluster storage does not have enough space thus break to error + return nil, false, false + } + storageAdjusted = true + break + } + } + + // requested storage class is not present in the cluster + // there is no point to adjust inventory further + if !storageAdjusted { return nil, false, false } } // all requirements for current group have been satisfied // commit and move on - inv.nodes[node] = nd - inv.storageClasses = storageClasses + inv.Nodes[node] = nd + inv.Storage = storageClasses if reflect.DeepEqual(sparams, &crd.SchedulerParams{}) { return nil, true, true @@ -120,6 +134,103 @@ func (inv *inventory) tryAdjust(node string, res *types.Resources) (*crd.Schedul return sparams, true, true } +func tryAdjustCPU(rp *inventoryV1.ResourcePair, res *types.CPU) bool { + return rp.SubMilliNLZ(res.Units) +} + +func tryAdjustGPU(rp *inventoryV1.GPU, res *types.GPU, sparams *crd.SchedulerParams) bool { + reqCnt := res.Units.Value() + + if reqCnt == 0 { + return true + } + + if rp.Quantity.Available().Value() == 0 { + return false + } + + attrs, err := ctypes.ParseGPUAttributes(res.Attributes) + if err != nil { + return false + } + + for _, info := range rp.Info { + models, exists := attrs[info.Vendor] + if !exists { + continue + } + + attr, exists := models.ExistsOrWildcard(info.Name) + if !exists { + continue + } + + if attr != nil { + if attr.RAM != "" && attr.RAM != info.MemorySize { + continue + } + + if attr.Interface != "" && attr.RAM != info.Interface { + continue + } + } + + reqCnt-- + if reqCnt == 0 { + vendor := strings.ToLower(info.Vendor) + + if !rp.Quantity.SubNLZ(res.Units) { + return false + } + + sParamsEnsureGPU(sparams) + sparams.Resources.GPU.Vendor = vendor + sparams.Resources.GPU.Model = info.Name + + switch vendor { + case builder.GPUVendorNvidia: + sparams.RuntimeClass = runtimeClassNvidia + default: + } + + key := fmt.Sprintf("vendor/%s/model/%s", vendor, info.Name) + if attr != nil { + if attr.RAM != "" { + key = fmt.Sprintf("%s/ram/%s", key, attr.RAM) + } + + if attr.Interface != "" { + key = fmt.Sprintf("%s/interface/%s", key, attr.Interface) + } + } + + res.Attributes = types.Attributes{ + { + Key: key, + Value: "true", + }, + } + + return true + } + } + + return false +} + +func tryAdjustMemory(rp *inventoryV1.ResourcePair, res *types.Memory) bool { + return rp.SubNLZ(res.Quantity) +} + +func tryAdjustEphemeralStorage(rp *inventoryV1.ResourcePair, res *types.Storage) bool { + return rp.SubNLZ(res.Quantity) +} + +// nolint: unused +func tryAdjustVolumesAttached(rp *inventoryV1.ResourcePair, res types.ResourceValue) bool { + return rp.SubNLZ(res) +} + func (inv *inventory) Adjust(reservation ctypes.ReservationGroup, opts ...ctypes.InventoryOption) error { cfg := &ctypes.InventoryOptions{} for _, opt := range opts { @@ -149,7 +260,7 @@ func (inv *inventory) Adjust(reservation ctypes.ReservationGroup, opts ...ctypes var err error nodes: - for nodeName := range currInventory.nodes { + for nodeIdx := range currInventory.Nodes { for i := len(resources) - 1; i >= 0; i-- { adjustedGroup := false @@ -163,7 +274,7 @@ nodes: } for ; resources[i].Count > 0; resources[i].Count-- { - sparams, nStatus, cStatus := currInventory.tryAdjust(nodeName, adjusted) + sparams, nStatus, cStatus := currInventory.tryAdjust(nodeIdx, adjusted) if !cStatus { // cannot satisfy cluster-wide resources, stop lookup break nodes @@ -234,6 +345,10 @@ nodes: return ctypes.ErrInsufficientCapacity } +func (inv *inventory) Snapshot() inventoryV1.Cluster { + return *inv.Cluster.Dup() +} + func (inv *inventory) Metrics() ctypes.InventoryMetrics { cpuTotal := uint64(0) gpuTotal := uint64(0) @@ -248,50 +363,50 @@ func (inv *inventory) Metrics() ctypes.InventoryMetrics { storageAvailable := make(map[string]int64) ret := ctypes.InventoryMetrics{ - Nodes: make([]ctypes.InventoryNode, 0, len(inv.nodes)), + Nodes: make([]ctypes.InventoryNode, 0, len(inv.Nodes)), } - for nodeName, nd := range inv.nodes { + for _, nd := range inv.Nodes { invNode := ctypes.InventoryNode{ - Name: nodeName, + Name: nd.Name, Allocatable: ctypes.InventoryNodeMetric{ - CPU: uint64(nd.cpu.allocatable.MilliValue()), - GPU: uint64(nd.gpu.allocatable.Value()), - Memory: uint64(nd.memory.allocatable.Value()), - StorageEphemeral: uint64(nd.ephemeralStorage.allocatable.Value()), + CPU: uint64(nd.Resources.CPU.Quantity.Allocatable.MilliValue()), + GPU: uint64(nd.Resources.GPU.Quantity.Allocatable.Value()), + Memory: uint64(nd.Resources.Memory.Quantity.Allocatable.Value()), + StorageEphemeral: uint64(nd.Resources.EphemeralStorage.Allocatable.Value()), }, } - cpuTotal += uint64(nd.cpu.allocatable.MilliValue()) - gpuTotal += uint64(nd.gpu.allocatable.Value()) - memoryTotal += uint64(nd.memory.allocatable.Value()) - storageEphemeralTotal += uint64(nd.ephemeralStorage.allocatable.Value()) + cpuTotal += uint64(nd.Resources.CPU.Quantity.Allocatable.MilliValue()) + gpuTotal += uint64(nd.Resources.GPU.Quantity.Allocatable.Value()) + memoryTotal += uint64(nd.Resources.Memory.Quantity.Allocatable.Value()) + storageEphemeralTotal += uint64(nd.Resources.EphemeralStorage.Allocatable.Value()) - avail := nd.cpu.available() + avail := nd.Resources.CPU.Quantity.Available() invNode.Available.CPU = uint64(avail.MilliValue()) cpuAvailable += invNode.Available.CPU - avail = nd.gpu.available() + avail = nd.Resources.GPU.Quantity.Available() invNode.Available.GPU = uint64(avail.Value()) gpuAvailable += invNode.Available.GPU - avail = nd.memory.available() + avail = nd.Resources.Memory.Quantity.Available() invNode.Available.Memory = uint64(avail.Value()) memoryAvailable += invNode.Available.Memory - avail = nd.ephemeralStorage.available() + avail = nd.Resources.EphemeralStorage.Available() invNode.Available.StorageEphemeral = uint64(avail.Value()) storageEphemeralAvailable += invNode.Available.StorageEphemeral ret.Nodes = append(ret.Nodes, invNode) } - for class, storage := range inv.storageClasses { - tmp := storage.allocatable.DeepCopy() - storageTotal[class] = tmp.Value() + for _, class := range inv.Storage { + tmp := class.Quantity.Allocatable.DeepCopy() + storageTotal[class.Info.Class] = tmp.Value() - tmp = storage.available() - storageAvailable[class] = tmp.Value() + tmp = *class.Quantity.Available() + storageAvailable[class.Info.Class] = tmp.Value() } ret.TotalAllocatable = ctypes.InventoryMetricTotal{ @@ -314,30 +429,12 @@ func (inv *inventory) Metrics() ctypes.InventoryMetrics { } func (c *client) Inventory(ctx context.Context) (ctypes.Inventory, error) { - cstorage, err := c.fetchStorage(ctx) - if err != nil { - // log inventory operator error but keep going to fetch nodes - // as provider still may make bids on orders without persistent storage - c.log.Error("checking storage inventory", "error", err.Error()) - } - - knodes, err := c.fetchActiveNodes(ctx, cstorage) - if err != nil { - return nil, err - } - - return newInventory(c.log.With("kube", "inventory"), cstorage, knodes), nil -} - -func (c *client) fetchStorage(ctx context.Context) (clusterStorage, error) { ctx, cancel := context.WithTimeout(ctx, inventoryOperatorQueryTimeout) defer cancel() - cstorage := make(clusterStorage) - // discover inventory operator // empty namespace mean search through all namespaces - svcResult, err := c.kc.CoreV1().Services(corev1.NamespaceAll).List(ctx, metav1.ListOptions{ + svcResult, err := c.kc.CoreV1().Services("akash-services").List(ctx, metav1.ListOptions{ LabelSelector: builder.AkashManagedLabelName + "=true" + ",app.kubernetes.io/name=akash" + ",app.kubernetes.io/instance=inventory" + @@ -348,7 +445,7 @@ func (c *client) fetchStorage(ctx context.Context) (clusterStorage, error) { } if len(svcResult.Items) == 0 { - return nil, nil + return nil, ErrInventoryNotAvailable } result := c.kc.CoreV1().RESTClient().Get(). @@ -356,203 +453,38 @@ func (c *client) fetchStorage(ctx context.Context) (clusterStorage, error) { Resource("services"). Name(svcResult.Items[0].Name + ":api"). SubResource("proxy"). - Suffix("inventory"). + Suffix("v1/inventory"). Do(ctx) if err := result.Error(); err != nil { return nil, err } - inv := &crd.Inventory{} - - if err := result.Into(inv); err != nil { - return nil, err - } - - statusPairs := make([]interface{}, 0, len(inv.Status.Messages)) - for idx, msg := range inv.Status.Messages { - statusPairs = append(statusPairs, fmt.Sprintf("msg%d", idx)) - statusPairs = append(statusPairs, msg) - } - - if len(statusPairs) > 0 { - c.log.Info("inventory request performed with warnings", statusPairs...) - } - - for _, storage := range inv.Spec.Storage { - if !isSupportedStorageClass(storage.Class) { - continue - } - - cstorage[storage.Class] = rpNewFromAkash(storage.ResourcePair) - } - - return cstorage, nil -} - -// todo write unmarshaler -func parseNodeCapabilities(labels map[string]string, cStorage clusterStorage) *crd.NodeInfoCapabilities { - capabilities := &crd.NodeInfoCapabilities{} - - for k := range labels { - tokens := strings.Split(k, "/") - if len(tokens) != 2 && tokens[0] != builder.AkashManagedLabelName { - continue - } - - tokens = strings.Split(tokens[1], ".") - if len(tokens) < 2 || tokens[0] != "capabilities" { - continue - } - - tokens = tokens[1:] - switch tokens[0] { - case "gpu": - if len(tokens) < 2 { - continue - } - - tokens = tokens[1:] - if tokens[0] == "vendor" { - capabilities.GPU.Vendor = tokens[1] - if tokens[2] == "model" { - capabilities.GPU.Model = tokens[3] - } - } - case "storage": - if len(tokens) < 2 { - continue - } - - switch tokens[1] { - case "class": - capabilities.Storage.Classes = append(capabilities.Storage.Classes, tokens[2]) - default: - } - } - } - - // parse storage classes with legacy mode if new mode is not detected - if len(capabilities.Storage.Classes) == 0 { - if value, defined := labels[builder.AkashNetworkStorageClasses]; defined { - for _, class := range strings.Split(value, ".") { - if _, avail := cStorage[class]; avail { - capabilities.Storage.Classes = append(capabilities.Storage.Classes, class) - } - } - } - } - - return capabilities -} - -func (c *client) fetchActiveNodes(ctx context.Context, cstorage clusterStorage) (map[string]*node, error) { - // todo filter nodes by akash.network label - knodes, err := wrapKubeCall("nodes-list", func() (*corev1.NodeList, error) { - return c.kc.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) - }) + data, err := result.Raw() if err != nil { return nil, err } - podListOptions := metav1.ListOptions{ - FieldSelector: "status.phase==Running", - } - podsClient := c.kc.CoreV1().Pods(metav1.NamespaceAll) - podsPager := pager.New(func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error) { - return podsClient.List(ctx, opts) - }) - - retnodes := make(map[string]*node) - for _, knode := range knodes.Items { - if !c.nodeIsActive(knode) { - continue - } - - capabilities := parseNodeCapabilities(knode.Labels, cstorage) - - retnodes[knode.Name] = newNode(&knode.Status, capabilities) - } - - // Go over each pod and sum the resources for it into the value for the pod it lives on - err = podsPager.EachListItem(ctx, podListOptions, func(obj runtime.Object) error { - pod := obj.(*corev1.Pod) - nodeName := pod.Spec.NodeName - - entry, validNode := retnodes[nodeName] - if !validNode { - return nil - } - - for _, container := range pod.Spec.Containers { - entry.addAllocatedResources(container.Resources.Requests) - } - - // Add overhead for running a pod to the sum of requests - // https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/ - entry.addAllocatedResources(pod.Spec.Overhead) - - retnodes[nodeName] = entry // Map is by value, so store the copy back into the map - - return nil - }) - - if err != nil { + inv := inventoryV1.Cluster{} + if err = json.Unmarshal(data, &inv); err != nil { return nil, err } - return retnodes, nil -} + res := newInventory(c.log.With("kube", "inventory"), inv) -func (c *client) nodeIsActive(node corev1.Node) bool { - ready := false - issues := 0 + return res, nil +} - for _, cond := range node.Status.Conditions { - switch cond.Type { - case corev1.NodeReady: - if cond.Status == corev1.ConditionTrue { - ready = true - } - case corev1.NodeMemoryPressure: - fallthrough - case corev1.NodeDiskPressure: - fallthrough - case corev1.NodePIDPressure: - fallthrough - case corev1.NodeNetworkUnavailable: - if cond.Status != corev1.ConditionFalse { - c.log.Error("node in poor condition", - "node", node.Name, - "condition", cond.Type, - "status", cond.Status) - - issues++ - } - } - } +func sParamsEnsureGPU(sparams *crd.SchedulerParams) { + sParamsEnsureResources(sparams) - // If the node has been tainted, don't consider it active. - for _, taint := range node.Spec.Taints { - if taint.Effect == corev1.TaintEffectNoSchedule || taint.Effect == corev1.TaintEffectNoExecute { - issues++ - } + if sparams.Resources.GPU == nil { + sparams.Resources.GPU = &crd.SchedulerResourceGPU{} } - - return ready && issues == 0 } -func isSupportedStorageClass(name string) bool { - switch name { - case "default": - fallthrough - case "beta1": - fallthrough - case "beta2": - fallthrough - case "beta3": - return true - default: - return false +func sParamsEnsureResources(sparams *crd.SchedulerParams) { + if sparams.Resources == nil { + sparams.Resources = &crd.SchedulerResources{} } } diff --git a/cluster/kube/inventory_test.go b/cluster/kube/inventory_test.go index cdafae7b..75367469 100644 --- a/cluster/kube/inventory_test.go +++ b/cluster/kube/inventory_test.go @@ -1,22 +1,30 @@ package kube import ( + "bytes" "context" + "encoding/json" + "io" + "net/http" "testing" - "github.com/pkg/errors" + inventoryV1 "github.com/akash-network/akash-api/go/inventory/v1" + dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" + mtypes "github.com/akash-network/akash-api/go/node/market/v1beta4" + "github.com/akash-network/akash-api/go/node/types/unit" + atypes "github.com/akash-network/akash-api/go/node/types/v1beta3" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" + kerrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" - mtypes "github.com/akash-network/akash-api/go/node/market/v1beta4" - "github.com/akash-network/akash-api/go/node/types/unit" - atypes "github.com/akash-network/akash-api/go/node/types/v1beta3" - "github.com/akash-network/node/testutil" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes/scheme" + manualfake "k8s.io/client-go/rest/fake" "github.com/akash-network/provider/cluster/kube/builder" ctypes "github.com/akash-network/provider/cluster/types/v1beta3" @@ -63,6 +71,8 @@ func (r *testReservation) ClusterParams() interface{} { return r.cparams } +type proxyCallback func(req *http.Request) (*http.Response, error) + type inventoryScaffold struct { kmock *kubernetesmocks.Interface amock *akashclientfake.Clientset @@ -77,7 +87,7 @@ type inventoryScaffold struct { nsList *v1.NamespaceList } -func makeInventoryScaffold() *inventoryScaffold { +func makeInventoryScaffold(proxycb proxyCallback) *inventoryScaffold { s := &inventoryScaffold{ kmock: &kubernetesmocks.Interface{}, amock: akashclientfake.NewSimpleClientset(), @@ -92,12 +102,19 @@ func makeInventoryScaffold() *inventoryScaffold { nsList: &v1.NamespaceList{}, } + fakeClient := &manualfake.RESTClient{ + GroupVersion: appsv1.SchemeGroupVersion, + NegotiatedSerializer: scheme.Codecs, + Client: manualfake.CreateHTTPClient(proxycb), + } + s.kmock.On("CoreV1").Return(s.coreV1Mock) + s.coreV1Mock.On("RESTClient").Return(fakeClient) s.coreV1Mock.On("Namespaces").Return(s.nsInterface, nil) s.coreV1Mock.On("Nodes").Return(s.nodeInterfaceMock, nil) s.coreV1Mock.On("Pods", "" /* all namespaces */).Return(s.podInterfaceMock, nil) - s.coreV1Mock.On("Services", "" /* all namespaces */).Return(s.servicesInterfaceMock, nil) + s.coreV1Mock.On("Services", mock.Anything).Return(s.servicesInterfaceMock, nil) s.nsInterface.On("List", mock.Anything, mock.Anything).Return(s.nsList, nil) @@ -106,20 +123,48 @@ func makeInventoryScaffold() *inventoryScaffold { s.storageV1Interface.On("StorageClasses").Return(s.storageClassesInterface, nil) s.storageClassesInterface.On("List", mock.Anything, mock.Anything).Return(s.storageClassesList, nil) - s.servicesInterfaceMock.On("List", mock.Anything, mock.Anything).Return(&v1.ServiceList{}, nil) + return s +} + +func (s *inventoryScaffold) withInventoryService(fn func(func(string, ...interface{}) *mock.Call)) *inventoryScaffold { + fn(s.servicesInterfaceMock.On) return s } +func defaultInventoryService(on func(string, ...interface{}) *mock.Call) { + svcList := &v1.ServiceList{ + Items: []v1.Service{ + { + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "operator-inventory", + Namespace: "akash-services", + }, + Spec: v1.ServiceSpec{}, + Status: v1.ServiceStatus{}, + }, + }, + } + + listOptions := metav1.ListOptions{ + LabelSelector: builder.AkashManagedLabelName + "=true" + + ",app.kubernetes.io/name=akash" + + ",app.kubernetes.io/instance=inventory" + + ",app.kubernetes.io/component=operator", + } + + on("List", mock.Anything, listOptions).Return(svcList, nil) +} + func TestInventoryZero(t *testing.T) { - s := makeInventoryScaffold() + s := makeInventoryScaffold(func(req *http.Request) (*http.Response, error) { + inv := inventoryV1.Cluster{} - nodeList := &v1.NodeList{} - listOptions := metav1.ListOptions{} - s.nodeInterfaceMock.On("List", mock.Anything, listOptions).Return(nodeList, nil) + data, _ := json.Marshal(inv) - podList := &v1.PodList{} - s.podInterfaceMock.On("List", mock.Anything, mock.Anything).Return(podList, nil) + return &http.Response{StatusCode: http.StatusOK, Body: io.NopCloser(bytes.NewBuffer(data))}, nil + }).withInventoryService(defaultInventoryService) clientInterface := clientForTest(t, s.kmock, s.amock) inventory, err := clientInterface.Inventory(context.Background()) @@ -129,51 +174,41 @@ func TestInventoryZero(t *testing.T) { // The inventory was called and the kubernetes client says there are no nodes & no pods. Inventory // should be zero require.Len(t, inventory.Metrics().Nodes, 0) - - podListOptionsInCall := s.podInterfaceMock.Calls[0].Arguments[1].(metav1.ListOptions) - require.Equal(t, "status.phase==Running", podListOptionsInCall.FieldSelector) } func TestInventorySingleNodeNoPods(t *testing.T) { - s := makeInventoryScaffold() - - nodeList := &v1.NodeList{} - nodeList.Items = make([]v1.Node, 1) - - nodeResourceList := make(v1.ResourceList) const expectedCPU = 13 - cpuQuantity := resource.NewQuantity(expectedCPU, "m") - nodeResourceList[v1.ResourceCPU] = *cpuQuantity - const expectedMemory = 14 - memoryQuantity := resource.NewQuantity(expectedMemory, "M") - nodeResourceList[v1.ResourceMemory] = *memoryQuantity - const expectedStorage = 15 - ephemeralStorageQuantity := resource.NewQuantity(expectedStorage, "M") - nodeResourceList[v1.ResourceEphemeralStorage] = *ephemeralStorageQuantity - nodeConditions := make([]v1.NodeCondition, 1) - nodeConditions[0] = v1.NodeCondition{ - Type: v1.NodeReady, - Status: v1.ConditionTrue, - } - - nodeList.Items[0] = v1.Node{ - TypeMeta: metav1.TypeMeta{}, - ObjectMeta: metav1.ObjectMeta{}, - Spec: v1.NodeSpec{}, - Status: v1.NodeStatus{ - Allocatable: nodeResourceList, - Conditions: nodeConditions, - }, - } + s := makeInventoryScaffold(func(req *http.Request) (*http.Response, error) { + inv := inventoryV1.Cluster{ + Nodes: inventoryV1.Nodes{ + inventoryV1.Node{ + Name: "test", + Resources: inventoryV1.NodeResources{ + CPU: inventoryV1.CPU{ + Quantity: inventoryV1.NewResourcePair(expectedCPU, 0, "m"), + }, + Memory: inventoryV1.Memory{ + Quantity: inventoryV1.NewResourcePair(expectedMemory, 0, "M"), + }, + GPU: inventoryV1.GPU{ + Quantity: inventoryV1.NewResourcePair(0, 0, resource.DecimalSI), + }, + EphemeralStorage: inventoryV1.NewResourcePair(expectedStorage, 0, "M"), + VolumesAttached: inventoryV1.NewResourcePair(0, 0, resource.DecimalSI), + VolumesMounted: inventoryV1.NewResourcePair(0, 0, resource.DecimalSI), + }, + Capabilities: inventoryV1.NodeCapabilities{}, + }, + }, + } - listOptions := metav1.ListOptions{} - s.nodeInterfaceMock.On("List", mock.Anything, listOptions).Return(nodeList, nil) + data, _ := json.Marshal(inv) - podList := &v1.PodList{} - s.podInterfaceMock.On("List", mock.Anything, mock.Anything).Return(podList, nil) + return &http.Response{StatusCode: http.StatusOK, Body: io.NopCloser(bytes.NewBuffer(data))}, nil + }).withInventoryService(defaultInventoryService) clientInterface := clientForTest(t, s.kmock, s.amock) inventory, err := clientInterface.Inventory(context.Background()) @@ -191,121 +226,43 @@ func TestInventorySingleNodeNoPods(t *testing.T) { } func TestInventorySingleNodeWithPods(t *testing.T) { - s := makeInventoryScaffold() - - nodeList := &v1.NodeList{} - nodeList.Items = make([]v1.Node, 1) - - nodeResourceList := make(v1.ResourceList) const expectedCPU = 13 - cpuQuantity := resource.NewQuantity(expectedCPU, "m") - nodeResourceList[v1.ResourceCPU] = *cpuQuantity - const expectedMemory = 2048 - memoryQuantity := resource.NewQuantity(expectedMemory, "M") - nodeResourceList[v1.ResourceMemory] = *memoryQuantity - const expectedStorage = 4096 - ephemeralStorageQuantity := resource.NewQuantity(expectedStorage, "M") - nodeResourceList[v1.ResourceEphemeralStorage] = *ephemeralStorageQuantity - - nodeConditions := make([]v1.NodeCondition, 1) - nodeConditions[0] = v1.NodeCondition{ - Type: v1.NodeReady, - Status: v1.ConditionTrue, - } - - nodeList.Items[0] = v1.Node{ - TypeMeta: metav1.TypeMeta{}, - ObjectMeta: metav1.ObjectMeta{}, - Spec: v1.NodeSpec{}, - Status: v1.NodeStatus{ - Allocatable: nodeResourceList, - Conditions: nodeConditions, - }, - } - - listOptions := metav1.ListOptions{} - s.nodeInterfaceMock.On("List", mock.Anything, listOptions).Return(nodeList, nil) const cpuPerContainer = 1 const memoryPerContainer = 3 const storagePerContainer = 17 - // Define two pods - pods := make([]v1.Pod, 2) - // First pod has 1 container - podContainers := make([]v1.Container, 1) - containerRequests := make(v1.ResourceList) - cpuQuantity.SetMilli(cpuPerContainer) - containerRequests[v1.ResourceCPU] = *cpuQuantity - - memoryQuantity = resource.NewQuantity(memoryPerContainer, "M") - containerRequests[v1.ResourceMemory] = *memoryQuantity - - ephemeralStorageQuantity = resource.NewQuantity(storagePerContainer, "M") - containerRequests[v1.ResourceEphemeralStorage] = *ephemeralStorageQuantity - - podContainers[0] = v1.Container{ - Resources: v1.ResourceRequirements{ - Limits: nil, - Requests: containerRequests, - }, - } - pods[0] = v1.Pod{ - TypeMeta: metav1.TypeMeta{}, - ObjectMeta: metav1.ObjectMeta{}, - Spec: v1.PodSpec{ - Containers: podContainers, - }, - Status: v1.PodStatus{}, - } - - // Define 2nd pod with multiple containers - podContainers = make([]v1.Container, 2) - for i := range podContainers { - containerRequests := make(v1.ResourceList) - cpuQuantity.SetMilli(cpuPerContainer) - containerRequests[v1.ResourceCPU] = *cpuQuantity - - memoryQuantity = resource.NewQuantity(memoryPerContainer, "M") - containerRequests[v1.ResourceMemory] = *memoryQuantity - - ephemeralStorageQuantity = resource.NewQuantity(storagePerContainer, "M") - containerRequests[v1.ResourceEphemeralStorage] = *ephemeralStorageQuantity - - // Container limits are enforced by kubernetes as absolute limits, but not - // used when considering inventory since overcommit is possible in a kubernetes cluster - // Set limits to any value larger than requests in this test since it should not change - // the value returned by the code - containerLimits := make(v1.ResourceList) - - for k, v := range containerRequests { - replacementV := resource.NewQuantity(0, "") - replacementV.Set(v.Value() * int64(testutil.RandRangeInt(2, 100))) - containerLimits[k] = *replacementV - } - - podContainers[i] = v1.Container{ - Resources: v1.ResourceRequirements{ - Limits: containerLimits, - Requests: containerRequests, + const totalContainers = 3 + + s := makeInventoryScaffold(func(req *http.Request) (*http.Response, error) { + inv := inventoryV1.Cluster{ + Nodes: inventoryV1.Nodes{ + inventoryV1.Node{ + Name: "test", + Resources: inventoryV1.NodeResources{ + CPU: inventoryV1.CPU{ + Quantity: inventoryV1.NewResourcePair(expectedCPU, cpuPerContainer*totalContainers, "m"), + }, + Memory: inventoryV1.Memory{ + Quantity: inventoryV1.NewResourcePair(expectedMemory, memoryPerContainer*totalContainers, "M"), + }, + GPU: inventoryV1.GPU{ + Quantity: inventoryV1.NewResourcePair(0, 0, resource.DecimalSI), + }, + EphemeralStorage: inventoryV1.NewResourcePair(expectedStorage, storagePerContainer*totalContainers, "M"), + VolumesAttached: inventoryV1.NewResourcePair(0, 0, resource.DecimalSI), + VolumesMounted: inventoryV1.NewResourcePair(0, 0, resource.DecimalSI), + }, + Capabilities: inventoryV1.NodeCapabilities{}, + }, }, } - } - pods[1] = v1.Pod{ - TypeMeta: metav1.TypeMeta{}, - ObjectMeta: metav1.ObjectMeta{}, - Spec: v1.PodSpec{ - Containers: podContainers, - }, - Status: v1.PodStatus{}, - } - podList := &v1.PodList{ - Items: pods, - } + data, _ := json.Marshal(inv) - s.podInterfaceMock.On("List", mock.Anything, mock.Anything).Return(podList, nil) + return &http.Response{StatusCode: http.StatusOK, Body: io.NopCloser(bytes.NewBuffer(data))}, nil + }).withInventoryService(defaultInventoryService) clientInterface := clientForTest(t, s.kmock, s.amock) inventory, err := clientInterface.Inventory(context.Background()) @@ -317,52 +274,48 @@ func TestInventorySingleNodeWithPods(t *testing.T) { node := inventory.Metrics().Nodes[0] availableResources := node.Available // Multiply expected value by 1000 since millicpu is used - require.Equal(t, uint64(expectedCPU*1000)-3*cpuPerContainer, availableResources.CPU) - require.Equal(t, uint64(expectedMemory)-3*memoryPerContainer, availableResources.Memory) - require.Equal(t, uint64(expectedStorage)-3*storagePerContainer, availableResources.StorageEphemeral) + assert.Equal(t, (uint64(expectedCPU)-(totalContainers*cpuPerContainer))*1000, availableResources.CPU) + assert.Equal(t, uint64(expectedMemory)-totalContainers*memoryPerContainer, availableResources.Memory) + assert.Equal(t, uint64(expectedStorage)-totalContainers*storagePerContainer, availableResources.StorageEphemeral) } -var errForTest = errors.New("error in test") - func TestInventoryWithNodeError(t *testing.T) { - s := makeInventoryScaffold() - - listOptions := metav1.ListOptions{} - s.nodeInterfaceMock.On("List", mock.Anything, listOptions).Return(nil, errForTest) + s := makeInventoryScaffold(func(req *http.Request) (*http.Response, error) { + return &http.Response{StatusCode: http.StatusServiceUnavailable, Body: io.NopCloser(&bytes.Buffer{})}, nil + }).withInventoryService(func(on func(string, ...interface{}) *mock.Call) { + on("List", mock.Anything, mock.Anything).Return(&v1.ServiceList{}, kerrors.NewNotFound(schema.GroupResource{}, "test-name")) + }) clientInterface := clientForTest(t, s.kmock, s.amock) inventory, err := clientInterface.Inventory(context.Background()) require.Error(t, err) - require.True(t, errors.Is(err, errForTest)) + require.True(t, kerrors.IsNotFound(err)) require.Nil(t, inventory) } func TestInventoryWithPodsError(t *testing.T) { - s := makeInventoryScaffold() - - listOptions := metav1.ListOptions{} - nodeList := &v1.NodeList{} - s.nodeInterfaceMock.On("List", mock.Anything, listOptions).Return(nodeList, nil) - s.podInterfaceMock.On("List", mock.Anything, mock.Anything).Return(nil, errForTest) + s := makeInventoryScaffold(func(req *http.Request) (*http.Response, error) { + return &http.Response{StatusCode: http.StatusServiceUnavailable, Body: io.NopCloser(&bytes.Buffer{})}, nil + }).withInventoryService(defaultInventoryService) clientInterface := clientForTest(t, s.kmock, s.amock) inventory, err := clientInterface.Inventory(context.Background()) + require.Error(t, err) - require.True(t, errors.Is(err, errForTest)) - require.Nil(t, inventory) + require.True(t, kerrors.IsServiceUnavailable(err)) + assert.Nil(t, inventory) } func TestInventoryMultipleReplicasFulFilled1(t *testing.T) { - s := makeInventoryScaffold() - - nodeList := &v1.NodeList{ - Items: multipleReplicasGenNodes(), - } + s := makeInventoryScaffold(func(req *http.Request) (*http.Response, error) { + inv := inventoryV1.Cluster{ + Nodes: multipleReplicasGenNodes(), + } - podList := &v1.PodList{Items: []v1.Pod{}} + data, _ := json.Marshal(inv) - s.nodeInterfaceMock.On("List", mock.Anything, mock.Anything).Return(nodeList, nil) - s.podInterfaceMock.On("List", mock.Anything, mock.Anything).Return(podList, nil) + return &http.Response{StatusCode: http.StatusOK, Body: io.NopCloser(bytes.NewBuffer(data))}, nil + }).withInventoryService(defaultInventoryService) clientInterface := clientForTest(t, s.kmock, s.amock) inv, err := clientInterface.Inventory(context.Background()) @@ -387,16 +340,15 @@ func TestInventoryMultipleReplicasFulFilled1(t *testing.T) { } func TestInventoryMultipleReplicasFulFilled2(t *testing.T) { - s := makeInventoryScaffold() - - nodeList := &v1.NodeList{ - Items: multipleReplicasGenNodes(), - } + s := makeInventoryScaffold(func(req *http.Request) (*http.Response, error) { + inv := inventoryV1.Cluster{ + Nodes: multipleReplicasGenNodes(), + } - podList := &v1.PodList{Items: []v1.Pod{}} + data, _ := json.Marshal(inv) - s.nodeInterfaceMock.On("List", mock.Anything, mock.Anything).Return(nodeList, nil) - s.podInterfaceMock.On("List", mock.Anything, mock.Anything).Return(podList, nil) + return &http.Response{StatusCode: http.StatusOK, Body: io.NopCloser(bytes.NewBuffer(data))}, nil + }).withInventoryService(defaultInventoryService) clientInterface := clientForTest(t, s.kmock, s.amock) inv, err := clientInterface.Inventory(context.Background()) @@ -409,16 +361,15 @@ func TestInventoryMultipleReplicasFulFilled2(t *testing.T) { } func TestInventoryMultipleReplicasFulFilled3(t *testing.T) { - s := makeInventoryScaffold() - - nodeList := &v1.NodeList{ - Items: multipleReplicasGenNodes(), - } + s := makeInventoryScaffold(func(req *http.Request) (*http.Response, error) { + inv := inventoryV1.Cluster{ + Nodes: multipleReplicasGenNodes(), + } - podList := &v1.PodList{Items: []v1.Pod{}} + data, _ := json.Marshal(inv) - s.nodeInterfaceMock.On("List", mock.Anything, mock.Anything).Return(nodeList, nil) - s.podInterfaceMock.On("List", mock.Anything, mock.Anything).Return(podList, nil) + return &http.Response{StatusCode: http.StatusOK, Body: io.NopCloser(bytes.NewBuffer(data))}, nil + }).withInventoryService(defaultInventoryService) clientInterface := clientForTest(t, s.kmock, s.amock) inv, err := clientInterface.Inventory(context.Background()) @@ -431,16 +382,15 @@ func TestInventoryMultipleReplicasFulFilled3(t *testing.T) { } func TestInventoryMultipleReplicasFulFilled4(t *testing.T) { - s := makeInventoryScaffold() - - nodeList := &v1.NodeList{ - Items: multipleReplicasGenNodes(), - } + s := makeInventoryScaffold(func(req *http.Request) (*http.Response, error) { + inv := inventoryV1.Cluster{ + Nodes: multipleReplicasGenNodes(), + } - podList := &v1.PodList{Items: []v1.Pod{}} + data, _ := json.Marshal(inv) - s.nodeInterfaceMock.On("List", mock.Anything, mock.Anything).Return(nodeList, nil) - s.podInterfaceMock.On("List", mock.Anything, mock.Anything).Return(podList, nil) + return &http.Response{StatusCode: http.StatusOK, Body: io.NopCloser(bytes.NewBuffer(data))}, nil + }).withInventoryService(defaultInventoryService) clientInterface := clientForTest(t, s.kmock, s.amock) inv, err := clientInterface.Inventory(context.Background()) @@ -453,16 +403,15 @@ func TestInventoryMultipleReplicasFulFilled4(t *testing.T) { } func TestInventoryMultipleReplicasFulFilled5(t *testing.T) { - s := makeInventoryScaffold() - - nodeList := &v1.NodeList{ - Items: multipleReplicasGenNodes(), - } + s := makeInventoryScaffold(func(req *http.Request) (*http.Response, error) { + inv := inventoryV1.Cluster{ + Nodes: multipleReplicasGenNodes(), + } - podList := &v1.PodList{Items: []v1.Pod{}} + data, _ := json.Marshal(inv) - s.nodeInterfaceMock.On("List", mock.Anything, mock.Anything).Return(nodeList, nil) - s.podInterfaceMock.On("List", mock.Anything, mock.Anything).Return(podList, nil) + return &http.Response{StatusCode: http.StatusOK, Body: io.NopCloser(bytes.NewBuffer(data))}, nil + }).withInventoryService(defaultInventoryService) clientInterface := clientForTest(t, s.kmock, s.amock) inv, err := clientInterface.Inventory(context.Background()) @@ -475,16 +424,15 @@ func TestInventoryMultipleReplicasFulFilled5(t *testing.T) { } func TestInventoryMultipleReplicasFulFilled6(t *testing.T) { - s := makeInventoryScaffold() - - nodeList := &v1.NodeList{ - Items: multipleReplicasGenNodes(), - } + s := makeInventoryScaffold(func(req *http.Request) (*http.Response, error) { + inv := inventoryV1.Cluster{ + Nodes: multipleReplicasGenNodes(), + } - podList := &v1.PodList{Items: []v1.Pod{}} + data, _ := json.Marshal(inv) - s.nodeInterfaceMock.On("List", mock.Anything, mock.Anything).Return(nodeList, nil) - s.podInterfaceMock.On("List", mock.Anything, mock.Anything).Return(podList, nil) + return &http.Response{StatusCode: http.StatusOK, Body: io.NopCloser(bytes.NewBuffer(data))}, nil + }).withInventoryService(defaultInventoryService) clientInterface := clientForTest(t, s.kmock, s.amock) inv, err := clientInterface.Inventory(context.Background()) @@ -497,16 +445,15 @@ func TestInventoryMultipleReplicasFulFilled6(t *testing.T) { } func TestInventoryMultipleReplicasFulFilled7(t *testing.T) { - s := makeInventoryScaffold() - - nodeList := &v1.NodeList{ - Items: multipleReplicasGenNodes(), - } + s := makeInventoryScaffold(func(req *http.Request) (*http.Response, error) { + inv := inventoryV1.Cluster{ + Nodes: multipleReplicasGenNodes(), + } - podList := &v1.PodList{Items: []v1.Pod{}} + data, _ := json.Marshal(inv) - s.nodeInterfaceMock.On("List", mock.Anything, mock.Anything).Return(nodeList, nil) - s.podInterfaceMock.On("List", mock.Anything, mock.Anything).Return(podList, nil) + return &http.Response{StatusCode: http.StatusOK, Body: io.NopCloser(bytes.NewBuffer(data))}, nil + }).withInventoryService(defaultInventoryService) clientInterface := clientForTest(t, s.kmock, s.amock) inv, err := clientInterface.Inventory(context.Background()) @@ -519,16 +466,15 @@ func TestInventoryMultipleReplicasFulFilled7(t *testing.T) { } func TestInventoryMultipleReplicasOutOfCapacity1(t *testing.T) { - s := makeInventoryScaffold() - - nodeList := &v1.NodeList{ - Items: multipleReplicasGenNodes(), - } + s := makeInventoryScaffold(func(req *http.Request) (*http.Response, error) { + inv := inventoryV1.Cluster{ + Nodes: multipleReplicasGenNodes(), + } - podList := &v1.PodList{Items: []v1.Pod{}} + data, _ := json.Marshal(inv) - s.nodeInterfaceMock.On("List", mock.Anything, mock.Anything).Return(nodeList, nil) - s.podInterfaceMock.On("List", mock.Anything, mock.Anything).Return(podList, nil) + return &http.Response{StatusCode: http.StatusOK, Body: io.NopCloser(bytes.NewBuffer(data))}, nil + }).withInventoryService(defaultInventoryService) clientInterface := clientForTest(t, s.kmock, s.amock) inv, err := clientInterface.Inventory(context.Background()) @@ -542,16 +488,15 @@ func TestInventoryMultipleReplicasOutOfCapacity1(t *testing.T) { } func TestInventoryMultipleReplicasOutOfCapacity2(t *testing.T) { - s := makeInventoryScaffold() - - nodeList := &v1.NodeList{ - Items: multipleReplicasGenNodes(), - } + s := makeInventoryScaffold(func(req *http.Request) (*http.Response, error) { + inv := inventoryV1.Cluster{ + Nodes: multipleReplicasGenNodes(), + } - podList := &v1.PodList{Items: []v1.Pod{}} + data, _ := json.Marshal(inv) - s.nodeInterfaceMock.On("List", mock.Anything, mock.Anything).Return(nodeList, nil) - s.podInterfaceMock.On("List", mock.Anything, mock.Anything).Return(podList, nil) + return &http.Response{StatusCode: http.StatusOK, Body: io.NopCloser(bytes.NewBuffer(data))}, nil + }).withInventoryService(defaultInventoryService) clientInterface := clientForTest(t, s.kmock, s.amock) inv, err := clientInterface.Inventory(context.Background()) @@ -565,16 +510,15 @@ func TestInventoryMultipleReplicasOutOfCapacity2(t *testing.T) { } func TestInventoryMultipleReplicasOutOfCapacity4(t *testing.T) { - s := makeInventoryScaffold() - - nodeList := &v1.NodeList{ - Items: multipleReplicasGenNodes(), - } + s := makeInventoryScaffold(func(req *http.Request) (*http.Response, error) { + inv := inventoryV1.Cluster{ + Nodes: multipleReplicasGenNodes(), + } - podList := &v1.PodList{Items: []v1.Pod{}} + data, _ := json.Marshal(inv) - s.nodeInterfaceMock.On("List", mock.Anything, mock.Anything).Return(nodeList, nil) - s.podInterfaceMock.On("List", mock.Anything, mock.Anything).Return(podList, nil) + return &http.Response{StatusCode: http.StatusOK, Body: io.NopCloser(bytes.NewBuffer(data))}, nil + }).withInventoryService(defaultInventoryService) clientInterface := clientForTest(t, s.kmock, s.amock) inv, err := clientInterface.Inventory(context.Background()) @@ -587,117 +531,98 @@ func TestInventoryMultipleReplicasOutOfCapacity4(t *testing.T) { require.EqualError(t, ctypes.ErrInsufficientCapacity, err.Error()) } -func TestParseCapabilities(t *testing.T) { - type testCase struct { - labels map[string]string - expCapabilities *crd.NodeInfoCapabilities - } - - tests := []testCase{ - { - labels: map[string]string{ - "akash.network/capabilities.gpu.vendor.nvidia.model.a100": "true", - }, - expCapabilities: &crd.NodeInfoCapabilities{ - GPU: crd.GPUCapabilities{ - Vendor: "nvidia", - Model: "a100", - }, - }, - }, - } - - for _, test := range tests { - caps := parseNodeCapabilities(test.labels, nil) - require.Equal(t, test.expCapabilities, caps) - } -} - // multipleReplicasGenNodes generates four nodes with following CPUs available // // node1: 68780 // node2: 68800 // node3: 119525 // node4: 119495 -func multipleReplicasGenNodes() []v1.Node { - nodeCapacity := make(v1.ResourceList) - nodeCapacity[v1.ResourceCPU] = *(resource.NewMilliQuantity(119800, resource.DecimalSI)) - nodeCapacity[v1.ResourceMemory] = *(resource.NewQuantity(474813259776, resource.DecimalSI)) - nodeCapacity[v1.ResourceEphemeralStorage] = *(resource.NewQuantity(7760751097705, resource.DecimalSI)) - - nodeConditions := make([]v1.NodeCondition, 1) - nodeConditions[0] = v1.NodeCondition{ - Type: v1.NodeReady, - Status: v1.ConditionTrue, - } - - return []v1.Node{ +func multipleReplicasGenNodes() inventoryV1.Nodes { + return inventoryV1.Nodes{ { - TypeMeta: metav1.TypeMeta{}, - ObjectMeta: metav1.ObjectMeta{ - Name: "node1", - }, - Spec: v1.NodeSpec{}, - Status: v1.NodeStatus{ - Allocatable: v1.ResourceList{ - v1.ResourceCPU: *(resource.NewMilliQuantity(68780, resource.DecimalSI)), - v1.ResourceMemory: *(resource.NewQuantity(457317732352, resource.DecimalSI)), - v1.ResourceEphemeralStorage: *(resource.NewQuantity(7752161163113, resource.DecimalSI)), + Name: "node1", + Resources: inventoryV1.NodeResources{ + CPU: inventoryV1.CPU{ + Quantity: inventoryV1.NewResourcePairMilli(119800, 51020, resource.DecimalSI), + }, + Memory: inventoryV1.Memory{ + Quantity: inventoryV1.NewResourcePair(457317732352, 17495527424, resource.DecimalSI), }, - Capacity: nodeCapacity, - Conditions: nodeConditions, + GPU: inventoryV1.GPU{ + Quantity: inventoryV1.NewResourcePair(0, 0, resource.DecimalSI), + }, + EphemeralStorage: inventoryV1.NewResourcePair(7760751097705, 8589934592, resource.DecimalSI), + VolumesAttached: inventoryV1.NewResourcePair(0, 0, resource.DecimalSI), + VolumesMounted: inventoryV1.NewResourcePair(0, 0, resource.DecimalSI), }, }, { - TypeMeta: metav1.TypeMeta{}, - ObjectMeta: metav1.ObjectMeta{ - Name: "node2", - Labels: map[string]string{ - "akash.network/capabilities.gpu.vendor.nvidia.model.a100": "true", + Name: "node2", + Resources: inventoryV1.NodeResources{ + CPU: inventoryV1.CPU{ + Quantity: inventoryV1.NewResourcePairMilli(119800, 51000, resource.DecimalSI), }, - }, - Spec: v1.NodeSpec{}, - Status: v1.NodeStatus{ - Allocatable: v1.ResourceList{ - v1.ResourceCPU: *(resource.NewMilliQuantity(68800, resource.DecimalSI)), - builder.ResourceGPUNvidia: *(resource.NewQuantity(2, resource.DecimalSI)), - v1.ResourceMemory: *(resource.NewQuantity(457328218112, resource.DecimalSI)), - v1.ResourceEphemeralStorage: *(resource.NewQuantity(7752161163113, resource.DecimalSI)), + Memory: inventoryV1.Memory{ + Quantity: inventoryV1.NewResourcePair(457317732352, 17495527424, resource.DecimalSI), + }, + GPU: inventoryV1.GPU{ + Quantity: inventoryV1.NewResourcePair(2, 0, resource.DecimalSI), + Info: inventoryV1.GPUInfoS{ + { + Vendor: "nvidia", + VendorID: "10de", + Name: "a100", + ModelID: "20b5", + Interface: "pcie", + MemorySize: "80Gi", + }, + { + Vendor: "nvidia", + VendorID: "10de", + Name: "a100", + ModelID: "20b5", + Interface: "pcie", + MemorySize: "80Gi", + }, + }, }, - Capacity: nodeCapacity, - Conditions: nodeConditions, + EphemeralStorage: inventoryV1.NewResourcePair(7760751097705, 8589934592, resource.DecimalSI), + VolumesAttached: inventoryV1.NewResourcePair(0, 0, resource.DecimalSI), + VolumesMounted: inventoryV1.NewResourcePair(0, 0, resource.DecimalSI), }, }, { - TypeMeta: metav1.TypeMeta{}, - ObjectMeta: metav1.ObjectMeta{ - Name: "node3", - }, - Spec: v1.NodeSpec{}, - Status: v1.NodeStatus{ - Allocatable: v1.ResourceList{ - v1.ResourceCPU: *(resource.NewMilliQuantity(119525, resource.DecimalSI)), - v1.ResourceMemory: *(resource.NewQuantity(474817923072, resource.DecimalSI)), - v1.ResourceEphemeralStorage: *(resource.NewQuantity(7760751097705, resource.DecimalSI)), + Name: "node3", + Resources: inventoryV1.NodeResources{ + CPU: inventoryV1.CPU{ + Quantity: inventoryV1.NewResourcePairMilli(119800, 275, resource.DecimalSI), + }, + Memory: inventoryV1.Memory{ + Quantity: inventoryV1.NewResourcePair(457317732352, 17495527424, resource.DecimalSI), }, - Capacity: nodeCapacity, - Conditions: nodeConditions, + GPU: inventoryV1.GPU{ + Quantity: inventoryV1.NewResourcePair(0, 0, resource.DecimalSI), + }, + EphemeralStorage: inventoryV1.NewResourcePair(7760751097705, 0, resource.DecimalSI), + VolumesAttached: inventoryV1.NewResourcePair(0, 0, resource.DecimalSI), + VolumesMounted: inventoryV1.NewResourcePair(0, 0, resource.DecimalSI), }, }, { - TypeMeta: metav1.TypeMeta{}, - ObjectMeta: metav1.ObjectMeta{ - Name: "node4", - }, - Spec: v1.NodeSpec{}, - Status: v1.NodeStatus{ - Allocatable: v1.ResourceList{ - v1.ResourceCPU: *(resource.NewMilliQuantity(119495, resource.DecimalSI)), - v1.ResourceMemory: *(resource.NewQuantity(474753923072, resource.DecimalSI)), - v1.ResourceEphemeralStorage: *(resource.NewQuantity(7760751097705, resource.DecimalSI)), + Name: "node4", + Resources: inventoryV1.NodeResources{ + CPU: inventoryV1.CPU{ + Quantity: inventoryV1.NewResourcePairMilli(119800, 305, resource.DecimalSI), + }, + Memory: inventoryV1.Memory{ + Quantity: inventoryV1.NewResourcePair(457317732352, 17495527424, resource.DecimalSI), + }, + GPU: inventoryV1.GPU{ + Quantity: inventoryV1.NewResourcePair(0, 0, resource.DecimalSI), }, - Capacity: nodeCapacity, - Conditions: nodeConditions, + EphemeralStorage: inventoryV1.NewResourcePair(7760751097705, 0, resource.DecimalSI), + VolumesAttached: inventoryV1.NewResourcePair(0, 0, resource.DecimalSI), + VolumesMounted: inventoryV1.NewResourcePair(0, 0, resource.DecimalSI), }, }, } diff --git a/cluster/kube/resourcetypes.go b/cluster/kube/resourcetypes.go index 773b71e5..eb3bbb6d 100644 --- a/cluster/kube/resourcetypes.go +++ b/cluster/kube/resourcetypes.go @@ -7,8 +7,6 @@ import ( "k8s.io/apimachinery/pkg/api/resource" types "github.com/akash-network/akash-api/go/node/types/v1beta3" - - crd "github.com/akash-network/provider/pkg/apis/akash.network/v2beta2" ) type resourcePair struct { @@ -16,39 +14,39 @@ type resourcePair struct { allocated resource.Quantity } -type clusterStorage map[string]*resourcePair - -func (cs clusterStorage) dup() clusterStorage { - res := make(clusterStorage) - for class, resources := range cs { - res[class] = resources.dup() - } - - return res -} - -func newResourcePair(allocatable, allocated resource.Quantity) resourcePair { - rp := resourcePair{ - allocatable: allocatable, - allocated: allocated, - } - - return rp -} - -func rpNewFromAkash(res crd.ResourcePair) *resourcePair { - return &resourcePair{ - allocatable: *resource.NewQuantity(int64(res.Allocatable), resource.DecimalSI), - allocated: *resource.NewQuantity(int64(res.Allocated), resource.DecimalSI), - } -} - -func (rp *resourcePair) dup() *resourcePair { - return &resourcePair{ - allocatable: rp.allocatable.DeepCopy(), - allocated: rp.allocated.DeepCopy(), - } -} +// type clusterStorage map[string]*resourcePair +// +// func (cs clusterStorage) dup() clusterStorage { +// res := make(clusterStorage) +// for class, resources := range cs { +// res[class] = resources.dup() +// } +// +// return res +// } +// +// func newResourcePair(allocatable, allocated resource.Quantity) resourcePair { +// rp := resourcePair{ +// allocatable: allocatable, +// allocated: allocated, +// } +// +// return rp +// } +// +// func rpNewFromAkash(res crd.ResourcePair) *resourcePair { +// return &resourcePair{ +// allocatable: *resource.NewQuantity(int64(res.Allocatable), resource.DecimalSI), +// allocated: *resource.NewQuantity(int64(res.Allocated), resource.DecimalSI), +// } +// } +// +// func (rp *resourcePair) dup() *resourcePair { +// return &resourcePair{ +// allocatable: rp.allocatable.DeepCopy(), +// allocated: rp.allocated.DeepCopy(), +// } +// } func (rp *resourcePair) subMilliNLZ(val types.ResourceValue) bool { avail := rp.available() diff --git a/cluster/mocks/client.go b/cluster/mocks/client.go index 0700c862..ac7b28a9 100644 --- a/cluster/mocks/client.go +++ b/cluster/mocks/client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.32.0. DO NOT EDIT. +// Code generated by mockery v2.40.1. DO NOT EDIT. package mocks @@ -39,6 +39,10 @@ func (_m *Client) EXPECT() *Client_Expecter { func (_m *Client) AllHostnames(_a0 context.Context) ([]v1beta3.ActiveHostname, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for AllHostnames") + } + var r0 []v1beta3.ActiveHostname var r1 error if rf, ok := ret.Get(0).(func(context.Context) ([]v1beta3.ActiveHostname, error)); ok { @@ -93,6 +97,10 @@ func (_c *Client_AllHostnames_Call) RunAndReturn(run func(context.Context) ([]v1 func (_m *Client) ConnectHostnameToDeployment(ctx context.Context, directive v1beta3.ConnectHostnameToDeploymentDirective) error { ret := _m.Called(ctx, directive) + if len(ret) == 0 { + panic("no return value specified for ConnectHostnameToDeployment") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, v1beta3.ConnectHostnameToDeploymentDirective) error); ok { r0 = rf(ctx, directive) @@ -136,6 +144,10 @@ func (_c *Client_ConnectHostnameToDeployment_Call) RunAndReturn(run func(context func (_m *Client) DeclareHostname(ctx context.Context, lID v1beta4.LeaseID, host string, serviceName string, externalPort uint32) error { ret := _m.Called(ctx, lID, host, serviceName, externalPort) + if len(ret) == 0 { + panic("no return value specified for DeclareHostname") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, v1beta4.LeaseID, string, string, uint32) error); ok { r0 = rf(ctx, lID, host, serviceName, externalPort) @@ -182,6 +194,10 @@ func (_c *Client_DeclareHostname_Call) RunAndReturn(run func(context.Context, v1 func (_m *Client) DeclareIP(ctx context.Context, lID v1beta4.LeaseID, serviceName string, port uint32, externalPort uint32, proto v2beta2.ServiceProtocol, sharingKey string, overwrite bool) error { ret := _m.Called(ctx, lID, serviceName, port, externalPort, proto, sharingKey, overwrite) + if len(ret) == 0 { + panic("no return value specified for DeclareIP") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, v1beta4.LeaseID, string, uint32, uint32, v2beta2.ServiceProtocol, string, bool) error); ok { r0 = rf(ctx, lID, serviceName, port, externalPort, proto, sharingKey, overwrite) @@ -231,6 +247,10 @@ func (_c *Client_DeclareIP_Call) RunAndReturn(run func(context.Context, v1beta4. func (_m *Client) Deploy(ctx context.Context, deployment v1beta3.IDeployment) error { ret := _m.Called(ctx, deployment) + if len(ret) == 0 { + panic("no return value specified for Deploy") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, v1beta3.IDeployment) error); ok { r0 = rf(ctx, deployment) @@ -274,6 +294,10 @@ func (_c *Client_Deploy_Call) RunAndReturn(run func(context.Context, v1beta3.IDe func (_m *Client) Deployments(_a0 context.Context) ([]v1beta3.IDeployment, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for Deployments") + } + var r0 []v1beta3.IDeployment var r1 error if rf, ok := ret.Get(0).(func(context.Context) ([]v1beta3.IDeployment, error)); ok { @@ -328,6 +352,10 @@ func (_c *Client_Deployments_Call) RunAndReturn(run func(context.Context) ([]v1b func (_m *Client) Exec(ctx context.Context, lID v1beta4.LeaseID, service string, podIndex uint, cmd []string, stdin io.Reader, stdout io.Writer, stderr io.Writer, tty bool, tsq remotecommand.TerminalSizeQueue) (v1beta3.ExecResult, error) { ret := _m.Called(ctx, lID, service, podIndex, cmd, stdin, stdout, stderr, tty, tsq) + if len(ret) == 0 { + panic("no return value specified for Exec") + } + var r0 v1beta3.ExecResult var r1 error if rf, ok := ret.Get(0).(func(context.Context, v1beta4.LeaseID, string, uint, []string, io.Reader, io.Writer, io.Writer, bool, remotecommand.TerminalSizeQueue) (v1beta3.ExecResult, error)); ok { @@ -391,6 +419,10 @@ func (_c *Client_Exec_Call) RunAndReturn(run func(context.Context, v1beta4.Lease func (_m *Client) ForwardedPortStatus(_a0 context.Context, _a1 v1beta4.LeaseID) (map[string][]v1beta3.ForwardedPortStatus, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for ForwardedPortStatus") + } + var r0 map[string][]v1beta3.ForwardedPortStatus var r1 error if rf, ok := ret.Get(0).(func(context.Context, v1beta4.LeaseID) (map[string][]v1beta3.ForwardedPortStatus, error)); ok { @@ -446,6 +478,10 @@ func (_c *Client_ForwardedPortStatus_Call) RunAndReturn(run func(context.Context func (_m *Client) GetDeclaredIPs(ctx context.Context, leaseID v1beta4.LeaseID) ([]akash_networkv2beta2.ProviderLeasedIPSpec, error) { ret := _m.Called(ctx, leaseID) + if len(ret) == 0 { + panic("no return value specified for GetDeclaredIPs") + } + var r0 []akash_networkv2beta2.ProviderLeasedIPSpec var r1 error if rf, ok := ret.Get(0).(func(context.Context, v1beta4.LeaseID) ([]akash_networkv2beta2.ProviderLeasedIPSpec, error)); ok { @@ -501,6 +537,10 @@ func (_c *Client_GetDeclaredIPs_Call) RunAndReturn(run func(context.Context, v1b func (_m *Client) GetHostnameDeploymentConnections(ctx context.Context) ([]v1beta3.LeaseIDHostnameConnection, error) { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for GetHostnameDeploymentConnections") + } + var r0 []v1beta3.LeaseIDHostnameConnection var r1 error if rf, ok := ret.Get(0).(func(context.Context) ([]v1beta3.LeaseIDHostnameConnection, error)); ok { @@ -555,6 +595,10 @@ func (_c *Client_GetHostnameDeploymentConnections_Call) RunAndReturn(run func(co func (_m *Client) GetManifestGroup(_a0 context.Context, _a1 v1beta4.LeaseID) (bool, akash_networkv2beta2.ManifestGroup, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetManifestGroup") + } + var r0 bool var r1 akash_networkv2beta2.ManifestGroup var r2 error @@ -615,6 +659,10 @@ func (_c *Client_GetManifestGroup_Call) RunAndReturn(run func(context.Context, v func (_m *Client) Inventory(_a0 context.Context) (v1beta3.Inventory, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for Inventory") + } + var r0 v1beta3.Inventory var r1 error if rf, ok := ret.Get(0).(func(context.Context) (v1beta3.Inventory, error)); ok { @@ -669,6 +717,10 @@ func (_c *Client_Inventory_Call) RunAndReturn(run func(context.Context) (v1beta3 func (_m *Client) KubeVersion() (*version.Info, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for KubeVersion") + } + var r0 *version.Info var r1 error if rf, ok := ret.Get(0).(func() (*version.Info, error)); ok { @@ -722,6 +774,10 @@ func (_c *Client_KubeVersion_Call) RunAndReturn(run func() (*version.Info, error func (_m *Client) LeaseEvents(_a0 context.Context, _a1 v1beta4.LeaseID, _a2 string, _a3 bool) (v1beta3.EventsWatcher, error) { ret := _m.Called(_a0, _a1, _a2, _a3) + if len(ret) == 0 { + panic("no return value specified for LeaseEvents") + } + var r0 v1beta3.EventsWatcher var r1 error if rf, ok := ret.Get(0).(func(context.Context, v1beta4.LeaseID, string, bool) (v1beta3.EventsWatcher, error)); ok { @@ -779,6 +835,10 @@ func (_c *Client_LeaseEvents_Call) RunAndReturn(run func(context.Context, v1beta func (_m *Client) LeaseLogs(_a0 context.Context, _a1 v1beta4.LeaseID, _a2 string, _a3 bool, _a4 *int64) ([]*v1beta3.ServiceLog, error) { ret := _m.Called(_a0, _a1, _a2, _a3, _a4) + if len(ret) == 0 { + panic("no return value specified for LeaseLogs") + } + var r0 []*v1beta3.ServiceLog var r1 error if rf, ok := ret.Get(0).(func(context.Context, v1beta4.LeaseID, string, bool, *int64) ([]*v1beta3.ServiceLog, error)); ok { @@ -837,6 +897,10 @@ func (_c *Client_LeaseLogs_Call) RunAndReturn(run func(context.Context, v1beta4. func (_m *Client) LeaseStatus(_a0 context.Context, _a1 v1beta4.LeaseID) (map[string]*v1beta3.ServiceStatus, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for LeaseStatus") + } + var r0 map[string]*v1beta3.ServiceStatus var r1 error if rf, ok := ret.Get(0).(func(context.Context, v1beta4.LeaseID) (map[string]*v1beta3.ServiceStatus, error)); ok { @@ -892,6 +956,10 @@ func (_c *Client_LeaseStatus_Call) RunAndReturn(run func(context.Context, v1beta func (_m *Client) ObserveHostnameState(ctx context.Context) (<-chan v1beta3.HostnameResourceEvent, error) { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for ObserveHostnameState") + } + var r0 <-chan v1beta3.HostnameResourceEvent var r1 error if rf, ok := ret.Get(0).(func(context.Context) (<-chan v1beta3.HostnameResourceEvent, error)); ok { @@ -946,6 +1014,10 @@ func (_c *Client_ObserveHostnameState_Call) RunAndReturn(run func(context.Contex func (_m *Client) ObserveIPState(ctx context.Context) (<-chan v1beta3.IPResourceEvent, error) { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for ObserveIPState") + } + var r0 <-chan v1beta3.IPResourceEvent var r1 error if rf, ok := ret.Get(0).(func(context.Context) (<-chan v1beta3.IPResourceEvent, error)); ok { @@ -1000,6 +1072,10 @@ func (_c *Client_ObserveIPState_Call) RunAndReturn(run func(context.Context) (<- func (_m *Client) PurgeDeclaredHostname(ctx context.Context, lID v1beta4.LeaseID, hostname string) error { ret := _m.Called(ctx, lID, hostname) + if len(ret) == 0 { + panic("no return value specified for PurgeDeclaredHostname") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, v1beta4.LeaseID, string) error); ok { r0 = rf(ctx, lID, hostname) @@ -1044,6 +1120,10 @@ func (_c *Client_PurgeDeclaredHostname_Call) RunAndReturn(run func(context.Conte func (_m *Client) PurgeDeclaredHostnames(ctx context.Context, lID v1beta4.LeaseID) error { ret := _m.Called(ctx, lID) + if len(ret) == 0 { + panic("no return value specified for PurgeDeclaredHostnames") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, v1beta4.LeaseID) error); ok { r0 = rf(ctx, lID) @@ -1087,6 +1167,10 @@ func (_c *Client_PurgeDeclaredHostnames_Call) RunAndReturn(run func(context.Cont func (_m *Client) PurgeDeclaredIP(ctx context.Context, lID v1beta4.LeaseID, serviceName string, externalPort uint32, proto v2beta2.ServiceProtocol) error { ret := _m.Called(ctx, lID, serviceName, externalPort, proto) + if len(ret) == 0 { + panic("no return value specified for PurgeDeclaredIP") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, v1beta4.LeaseID, string, uint32, v2beta2.ServiceProtocol) error); ok { r0 = rf(ctx, lID, serviceName, externalPort, proto) @@ -1133,6 +1217,10 @@ func (_c *Client_PurgeDeclaredIP_Call) RunAndReturn(run func(context.Context, v1 func (_m *Client) PurgeDeclaredIPs(ctx context.Context, lID v1beta4.LeaseID) error { ret := _m.Called(ctx, lID) + if len(ret) == 0 { + panic("no return value specified for PurgeDeclaredIPs") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, v1beta4.LeaseID) error); ok { r0 = rf(ctx, lID) @@ -1176,6 +1264,10 @@ func (_c *Client_PurgeDeclaredIPs_Call) RunAndReturn(run func(context.Context, v func (_m *Client) RemoveHostnameFromDeployment(ctx context.Context, hostname string, leaseID v1beta4.LeaseID, allowMissing bool) error { ret := _m.Called(ctx, hostname, leaseID, allowMissing) + if len(ret) == 0 { + panic("no return value specified for RemoveHostnameFromDeployment") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, v1beta4.LeaseID, bool) error); ok { r0 = rf(ctx, hostname, leaseID, allowMissing) @@ -1221,6 +1313,10 @@ func (_c *Client_RemoveHostnameFromDeployment_Call) RunAndReturn(run func(contex func (_m *Client) ServiceStatus(_a0 context.Context, _a1 v1beta4.LeaseID, _a2 string) (*v1beta3.ServiceStatus, error) { ret := _m.Called(_a0, _a1, _a2) + if len(ret) == 0 { + panic("no return value specified for ServiceStatus") + } + var r0 *v1beta3.ServiceStatus var r1 error if rf, ok := ret.Get(0).(func(context.Context, v1beta4.LeaseID, string) (*v1beta3.ServiceStatus, error)); ok { @@ -1277,6 +1373,10 @@ func (_c *Client_ServiceStatus_Call) RunAndReturn(run func(context.Context, v1be func (_m *Client) TeardownLease(_a0 context.Context, _a1 v1beta4.LeaseID) error { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for TeardownLease") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, v1beta4.LeaseID) error); ok { r0 = rf(_a0, _a1) diff --git a/cluster/mocks/cluster.go b/cluster/mocks/cluster.go index ada1e1ae..395b8d69 100644 --- a/cluster/mocks/cluster.go +++ b/cluster/mocks/cluster.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.32.0. DO NOT EDIT. +// Code generated by mockery v2.40.1. DO NOT EDIT. package mocks @@ -27,6 +27,10 @@ func (_m *Cluster) EXPECT() *Cluster_Expecter { func (_m *Cluster) Reserve(_a0 v1beta4.OrderID, _a1 v1beta3.ResourceGroup) (typesv1beta3.Reservation, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for Reserve") + } + var r0 typesv1beta3.Reservation var r1 error if rf, ok := ret.Get(0).(func(v1beta4.OrderID, v1beta3.ResourceGroup) (typesv1beta3.Reservation, error)); ok { @@ -82,6 +86,10 @@ func (_c *Cluster_Reserve_Call) RunAndReturn(run func(v1beta4.OrderID, v1beta3.R func (_m *Cluster) Unreserve(_a0 v1beta4.OrderID) error { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for Unreserve") + } + var r0 error if rf, ok := ret.Get(0).(func(v1beta4.OrderID) error); ok { r0 = rf(_a0) diff --git a/cluster/mocks/hostname_service_client.go b/cluster/mocks/hostname_service_client.go index 39393e24..53276cae 100644 --- a/cluster/mocks/hostname_service_client.go +++ b/cluster/mocks/hostname_service_client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.32.0. DO NOT EDIT. +// Code generated by mockery v2.40.1. DO NOT EDIT. package mocks @@ -28,6 +28,10 @@ func (_m *HostnameServiceClient) EXPECT() *HostnameServiceClient_Expecter { func (_m *HostnameServiceClient) CanReserveHostnames(hostnames []string, ownerAddr types.Address) error { ret := _m.Called(hostnames, ownerAddr) + if len(ret) == 0 { + panic("no return value specified for CanReserveHostnames") + } + var r0 error if rf, ok := ret.Get(0).(func([]string, types.Address) error); ok { r0 = rf(hostnames, ownerAddr) @@ -71,6 +75,10 @@ func (_c *HostnameServiceClient_CanReserveHostnames_Call) RunAndReturn(run func( func (_m *HostnameServiceClient) PrepareHostnamesForTransfer(ctx context.Context, hostnames []string, leaseID v1beta4.LeaseID) error { ret := _m.Called(ctx, hostnames, leaseID) + if len(ret) == 0 { + panic("no return value specified for PrepareHostnamesForTransfer") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, []string, v1beta4.LeaseID) error); ok { r0 = rf(ctx, hostnames, leaseID) @@ -115,6 +123,10 @@ func (_c *HostnameServiceClient_PrepareHostnamesForTransfer_Call) RunAndReturn(r func (_m *HostnameServiceClient) ReleaseHostnames(leaseID v1beta4.LeaseID) error { ret := _m.Called(leaseID) + if len(ret) == 0 { + panic("no return value specified for ReleaseHostnames") + } + var r0 error if rf, ok := ret.Get(0).(func(v1beta4.LeaseID) error); ok { r0 = rf(leaseID) @@ -157,6 +169,10 @@ func (_c *HostnameServiceClient_ReleaseHostnames_Call) RunAndReturn(run func(v1b func (_m *HostnameServiceClient) ReserveHostnames(ctx context.Context, hostnames []string, leaseID v1beta4.LeaseID) ([]string, error) { ret := _m.Called(ctx, hostnames, leaseID) + if len(ret) == 0 { + panic("no return value specified for ReserveHostnames") + } + var r0 []string var r1 error if rf, ok := ret.Get(0).(func(context.Context, []string, v1beta4.LeaseID) ([]string, error)); ok { diff --git a/cluster/mocks/i_deployment.go b/cluster/mocks/i_deployment.go index 83a75e6c..9fbf046e 100644 --- a/cluster/mocks/i_deployment.go +++ b/cluster/mocks/i_deployment.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.32.0. DO NOT EDIT. +// Code generated by mockery v2.40.1. DO NOT EDIT. package mocks @@ -27,6 +27,10 @@ func (_m *IDeployment) EXPECT() *IDeployment_Expecter { func (_m *IDeployment) ClusterParams() interface{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for ClusterParams") + } + var r0 interface{} if rf, ok := ret.Get(0).(func() interface{}); ok { r0 = rf() @@ -70,6 +74,10 @@ func (_c *IDeployment_ClusterParams_Call) RunAndReturn(run func() interface{}) * func (_m *IDeployment) LeaseID() v1beta4.LeaseID { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for LeaseID") + } + var r0 v1beta4.LeaseID if rf, ok := ret.Get(0).(func() v1beta4.LeaseID); ok { r0 = rf() @@ -111,6 +119,10 @@ func (_c *IDeployment_LeaseID_Call) RunAndReturn(run func() v1beta4.LeaseID) *ID func (_m *IDeployment) ManifestGroup() *v2beta2.Group { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for ManifestGroup") + } + var r0 *v2beta2.Group if rf, ok := ret.Get(0).(func() *v2beta2.Group); ok { r0 = rf() diff --git a/cluster/mocks/ip_operator_client.go b/cluster/mocks/ip_operator_client.go index 01772608..1c1de5e1 100644 --- a/cluster/mocks/ip_operator_client.go +++ b/cluster/mocks/ip_operator_client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.32.0. DO NOT EDIT. +// Code generated by mockery v2.40.1. DO NOT EDIT. package mocks @@ -29,6 +29,10 @@ func (_m *IPOperatorClient) EXPECT() *IPOperatorClient_Expecter { func (_m *IPOperatorClient) Check(ctx context.Context) error { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for Check") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context) error); ok { r0 = rf(ctx) @@ -71,6 +75,10 @@ func (_c *IPOperatorClient_Check_Call) RunAndReturn(run func(context.Context) er func (_m *IPOperatorClient) GetIPAddressStatus(ctx context.Context, orderID v1beta4.OrderID) ([]types.LeaseIPStatus, error) { ret := _m.Called(ctx, orderID) + if len(ret) == 0 { + panic("no return value specified for GetIPAddressStatus") + } + var r0 []types.LeaseIPStatus var r1 error if rf, ok := ret.Get(0).(func(context.Context, v1beta4.OrderID) ([]types.LeaseIPStatus, error)); ok { @@ -126,6 +134,10 @@ func (_c *IPOperatorClient_GetIPAddressStatus_Call) RunAndReturn(run func(contex func (_m *IPOperatorClient) GetIPAddressUsage(ctx context.Context) (types.IPAddressUsage, error) { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for GetIPAddressUsage") + } + var r0 types.IPAddressUsage var r1 error if rf, ok := ret.Get(0).(func(context.Context) (types.IPAddressUsage, error)); ok { @@ -210,6 +222,10 @@ func (_c *IPOperatorClient_Stop_Call) RunAndReturn(run func()) *IPOperatorClient func (_m *IPOperatorClient) String() string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for String") + } + var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() diff --git a/cluster/mocks/metallb_client.go b/cluster/mocks/metallb_client.go index 7c8c502a..a4c50ad0 100644 --- a/cluster/mocks/metallb_client.go +++ b/cluster/mocks/metallb_client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.32.0. DO NOT EDIT. +// Code generated by mockery v2.40.1. DO NOT EDIT. package mocks @@ -29,6 +29,10 @@ func (_m *MetalLBClient) EXPECT() *MetalLBClient_Expecter { func (_m *MetalLBClient) CreateIPPassthrough(ctx context.Context, directive v1beta3.ClusterIPPassthroughDirective) error { ret := _m.Called(ctx, directive) + if len(ret) == 0 { + panic("no return value specified for CreateIPPassthrough") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, v1beta3.ClusterIPPassthroughDirective) error); ok { r0 = rf(ctx, directive) @@ -72,6 +76,10 @@ func (_c *MetalLBClient_CreateIPPassthrough_Call) RunAndReturn(run func(context. func (_m *MetalLBClient) DetectPoolChanges(ctx context.Context) (<-chan struct{}, error) { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for DetectPoolChanges") + } + var r0 <-chan struct{} var r1 error if rf, ok := ret.Get(0).(func(context.Context) (<-chan struct{}, error)); ok { @@ -126,6 +134,10 @@ func (_c *MetalLBClient_DetectPoolChanges_Call) RunAndReturn(run func(context.Co func (_m *MetalLBClient) GetIPAddressStatusForLease(ctx context.Context, leaseID v1beta4.LeaseID) ([]v1beta3.IPLeaseState, error) { ret := _m.Called(ctx, leaseID) + if len(ret) == 0 { + panic("no return value specified for GetIPAddressStatusForLease") + } + var r0 []v1beta3.IPLeaseState var r1 error if rf, ok := ret.Get(0).(func(context.Context, v1beta4.LeaseID) ([]v1beta3.IPLeaseState, error)); ok { @@ -181,6 +193,10 @@ func (_c *MetalLBClient_GetIPAddressStatusForLease_Call) RunAndReturn(run func(c func (_m *MetalLBClient) GetIPAddressUsage(ctx context.Context) (uint, uint, error) { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for GetIPAddressUsage") + } + var r0 uint var r1 uint var r2 error @@ -240,6 +256,10 @@ func (_c *MetalLBClient_GetIPAddressUsage_Call) RunAndReturn(run func(context.Co func (_m *MetalLBClient) GetIPPassthroughs(ctx context.Context) ([]v1beta3.IPPassthrough, error) { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for GetIPPassthroughs") + } + var r0 []v1beta3.IPPassthrough var r1 error if rf, ok := ret.Get(0).(func(context.Context) ([]v1beta3.IPPassthrough, error)); ok { @@ -294,6 +314,10 @@ func (_c *MetalLBClient_GetIPPassthroughs_Call) RunAndReturn(run func(context.Co func (_m *MetalLBClient) PurgeIPPassthrough(ctx context.Context, directive v1beta3.ClusterIPPassthroughDirective) error { ret := _m.Called(ctx, directive) + if len(ret) == 0 { + panic("no return value specified for PurgeIPPassthrough") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, v1beta3.ClusterIPPassthroughDirective) error); ok { r0 = rf(ctx, directive) diff --git a/cluster/mocks/read_client.go b/cluster/mocks/read_client.go index 1e4ba272..e45aa26f 100644 --- a/cluster/mocks/read_client.go +++ b/cluster/mocks/read_client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.32.0. DO NOT EDIT. +// Code generated by mockery v2.40.1. DO NOT EDIT. package mocks @@ -30,6 +30,10 @@ func (_m *ReadClient) EXPECT() *ReadClient_Expecter { func (_m *ReadClient) AllHostnames(_a0 context.Context) ([]v1beta3.ActiveHostname, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for AllHostnames") + } + var r0 []v1beta3.ActiveHostname var r1 error if rf, ok := ret.Get(0).(func(context.Context) ([]v1beta3.ActiveHostname, error)); ok { @@ -84,6 +88,10 @@ func (_c *ReadClient_AllHostnames_Call) RunAndReturn(run func(context.Context) ( func (_m *ReadClient) ForwardedPortStatus(_a0 context.Context, _a1 v1beta4.LeaseID) (map[string][]v1beta3.ForwardedPortStatus, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for ForwardedPortStatus") + } + var r0 map[string][]v1beta3.ForwardedPortStatus var r1 error if rf, ok := ret.Get(0).(func(context.Context, v1beta4.LeaseID) (map[string][]v1beta3.ForwardedPortStatus, error)); ok { @@ -139,6 +147,10 @@ func (_c *ReadClient_ForwardedPortStatus_Call) RunAndReturn(run func(context.Con func (_m *ReadClient) GetDeclaredIPs(ctx context.Context, leaseID v1beta4.LeaseID) ([]v2beta2.ProviderLeasedIPSpec, error) { ret := _m.Called(ctx, leaseID) + if len(ret) == 0 { + panic("no return value specified for GetDeclaredIPs") + } + var r0 []v2beta2.ProviderLeasedIPSpec var r1 error if rf, ok := ret.Get(0).(func(context.Context, v1beta4.LeaseID) ([]v2beta2.ProviderLeasedIPSpec, error)); ok { @@ -194,6 +206,10 @@ func (_c *ReadClient_GetDeclaredIPs_Call) RunAndReturn(run func(context.Context, func (_m *ReadClient) GetHostnameDeploymentConnections(ctx context.Context) ([]v1beta3.LeaseIDHostnameConnection, error) { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for GetHostnameDeploymentConnections") + } + var r0 []v1beta3.LeaseIDHostnameConnection var r1 error if rf, ok := ret.Get(0).(func(context.Context) ([]v1beta3.LeaseIDHostnameConnection, error)); ok { @@ -248,6 +264,10 @@ func (_c *ReadClient_GetHostnameDeploymentConnections_Call) RunAndReturn(run fun func (_m *ReadClient) GetManifestGroup(_a0 context.Context, _a1 v1beta4.LeaseID) (bool, v2beta2.ManifestGroup, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetManifestGroup") + } + var r0 bool var r1 v2beta2.ManifestGroup var r2 error @@ -308,6 +328,10 @@ func (_c *ReadClient_GetManifestGroup_Call) RunAndReturn(run func(context.Contex func (_m *ReadClient) LeaseEvents(_a0 context.Context, _a1 v1beta4.LeaseID, _a2 string, _a3 bool) (v1beta3.EventsWatcher, error) { ret := _m.Called(_a0, _a1, _a2, _a3) + if len(ret) == 0 { + panic("no return value specified for LeaseEvents") + } + var r0 v1beta3.EventsWatcher var r1 error if rf, ok := ret.Get(0).(func(context.Context, v1beta4.LeaseID, string, bool) (v1beta3.EventsWatcher, error)); ok { @@ -365,6 +389,10 @@ func (_c *ReadClient_LeaseEvents_Call) RunAndReturn(run func(context.Context, v1 func (_m *ReadClient) LeaseLogs(_a0 context.Context, _a1 v1beta4.LeaseID, _a2 string, _a3 bool, _a4 *int64) ([]*v1beta3.ServiceLog, error) { ret := _m.Called(_a0, _a1, _a2, _a3, _a4) + if len(ret) == 0 { + panic("no return value specified for LeaseLogs") + } + var r0 []*v1beta3.ServiceLog var r1 error if rf, ok := ret.Get(0).(func(context.Context, v1beta4.LeaseID, string, bool, *int64) ([]*v1beta3.ServiceLog, error)); ok { @@ -423,6 +451,10 @@ func (_c *ReadClient_LeaseLogs_Call) RunAndReturn(run func(context.Context, v1be func (_m *ReadClient) LeaseStatus(_a0 context.Context, _a1 v1beta4.LeaseID) (map[string]*v1beta3.ServiceStatus, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for LeaseStatus") + } + var r0 map[string]*v1beta3.ServiceStatus var r1 error if rf, ok := ret.Get(0).(func(context.Context, v1beta4.LeaseID) (map[string]*v1beta3.ServiceStatus, error)); ok { @@ -478,6 +510,10 @@ func (_c *ReadClient_LeaseStatus_Call) RunAndReturn(run func(context.Context, v1 func (_m *ReadClient) ObserveHostnameState(ctx context.Context) (<-chan v1beta3.HostnameResourceEvent, error) { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for ObserveHostnameState") + } + var r0 <-chan v1beta3.HostnameResourceEvent var r1 error if rf, ok := ret.Get(0).(func(context.Context) (<-chan v1beta3.HostnameResourceEvent, error)); ok { @@ -532,6 +568,10 @@ func (_c *ReadClient_ObserveHostnameState_Call) RunAndReturn(run func(context.Co func (_m *ReadClient) ObserveIPState(ctx context.Context) (<-chan v1beta3.IPResourceEvent, error) { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for ObserveIPState") + } + var r0 <-chan v1beta3.IPResourceEvent var r1 error if rf, ok := ret.Get(0).(func(context.Context) (<-chan v1beta3.IPResourceEvent, error)); ok { @@ -586,6 +626,10 @@ func (_c *ReadClient_ObserveIPState_Call) RunAndReturn(run func(context.Context) func (_m *ReadClient) ServiceStatus(_a0 context.Context, _a1 v1beta4.LeaseID, _a2 string) (*v1beta3.ServiceStatus, error) { ret := _m.Called(_a0, _a1, _a2) + if len(ret) == 0 { + panic("no return value specified for ServiceStatus") + } + var r0 *v1beta3.ServiceStatus var r1 error if rf, ok := ret.Get(0).(func(context.Context, v1beta4.LeaseID, string) (*v1beta3.ServiceStatus, error)); ok { diff --git a/cluster/mocks/reservation.go b/cluster/mocks/reservation.go index 6dd7149a..0fe0404e 100644 --- a/cluster/mocks/reservation.go +++ b/cluster/mocks/reservation.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.32.0. DO NOT EDIT. +// Code generated by mockery v2.40.1. DO NOT EDIT. package mocks @@ -26,6 +26,10 @@ func (_m *Reservation) EXPECT() *Reservation_Expecter { func (_m *Reservation) Allocated() bool { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Allocated") + } + var r0 bool if rf, ok := ret.Get(0).(func() bool); ok { r0 = rf() @@ -67,6 +71,10 @@ func (_c *Reservation_Allocated_Call) RunAndReturn(run func() bool) *Reservation func (_m *Reservation) ClusterParams() interface{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for ClusterParams") + } + var r0 interface{} if rf, ok := ret.Get(0).(func() interface{}); ok { r0 = rf() @@ -110,6 +118,10 @@ func (_c *Reservation_ClusterParams_Call) RunAndReturn(run func() interface{}) * func (_m *Reservation) GetAllocatedResources() deploymentv1beta3.ResourceUnits { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for GetAllocatedResources") + } + var r0 deploymentv1beta3.ResourceUnits if rf, ok := ret.Get(0).(func() deploymentv1beta3.ResourceUnits); ok { r0 = rf() @@ -153,6 +165,10 @@ func (_c *Reservation_GetAllocatedResources_Call) RunAndReturn(run func() deploy func (_m *Reservation) OrderID() v1beta4.OrderID { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for OrderID") + } + var r0 v1beta4.OrderID if rf, ok := ret.Get(0).(func() v1beta4.OrderID); ok { r0 = rf() @@ -194,6 +210,10 @@ func (_c *Reservation_OrderID_Call) RunAndReturn(run func() v1beta4.OrderID) *Re func (_m *Reservation) Resources() deploymentv1beta3.ResourceGroup { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Resources") + } + var r0 deploymentv1beta3.ResourceGroup if rf, ok := ret.Get(0).(func() deploymentv1beta3.ResourceGroup); ok { r0 = rf() diff --git a/cluster/mocks/reservation_group.go b/cluster/mocks/reservation_group.go index 6735d759..cceb7326 100644 --- a/cluster/mocks/reservation_group.go +++ b/cluster/mocks/reservation_group.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.32.0. DO NOT EDIT. +// Code generated by mockery v2.40.1. DO NOT EDIT. package mocks @@ -24,6 +24,10 @@ func (_m *ReservationGroup) EXPECT() *ReservationGroup_Expecter { func (_m *ReservationGroup) ClusterParams() interface{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for ClusterParams") + } + var r0 interface{} if rf, ok := ret.Get(0).(func() interface{}); ok { r0 = rf() @@ -67,6 +71,10 @@ func (_c *ReservationGroup_ClusterParams_Call) RunAndReturn(run func() interface func (_m *ReservationGroup) GetAllocatedResources() deploymentv1beta3.ResourceUnits { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for GetAllocatedResources") + } + var r0 deploymentv1beta3.ResourceUnits if rf, ok := ret.Get(0).(func() deploymentv1beta3.ResourceUnits); ok { r0 = rf() @@ -110,6 +118,10 @@ func (_c *ReservationGroup_GetAllocatedResources_Call) RunAndReturn(run func() d func (_m *ReservationGroup) Resources() deploymentv1beta3.ResourceGroup { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Resources") + } + var r0 deploymentv1beta3.ResourceGroup if rf, ok := ret.Get(0).(func() deploymentv1beta3.ResourceGroup); ok { r0 = rf() diff --git a/cluster/mocks/service.go b/cluster/mocks/service.go index 6abc903e..c6e759fe 100644 --- a/cluster/mocks/service.go +++ b/cluster/mocks/service.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.32.0. DO NOT EDIT. +// Code generated by mockery v2.40.1. DO NOT EDIT. package mocks @@ -10,6 +10,8 @@ import ( types "github.com/cosmos/cosmos-sdk/types" + v1 "github.com/akash-network/akash-api/go/provider/v1" + v1beta3 "github.com/akash-network/provider/cluster/types/v1beta3" v1beta4 "github.com/akash-network/akash-api/go/node/market/v1beta4" @@ -34,6 +36,10 @@ func (_m *Service) EXPECT() *Service_Expecter { func (_m *Service) Close() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Close") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -75,6 +81,10 @@ func (_c *Service_Close_Call) RunAndReturn(run func() error) *Service_Close_Call func (_m *Service) Done() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Done") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -118,6 +128,10 @@ func (_c *Service_Done_Call) RunAndReturn(run func() <-chan struct{}) *Service_D func (_m *Service) FindActiveLease(ctx context.Context, owner types.Address, dseq uint64, gseq uint32) (bool, v1beta4.LeaseID, v2beta2.ManifestGroup, error) { ret := _m.Called(ctx, owner, dseq, gseq) + if len(ret) == 0 { + panic("no return value specified for FindActiveLease") + } + var r0 bool var r1 v1beta4.LeaseID var r2 v2beta2.ManifestGroup @@ -187,6 +201,10 @@ func (_c *Service_FindActiveLease_Call) RunAndReturn(run func(context.Context, t func (_m *Service) HostnameService() v1beta3.HostnameServiceClient { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for HostnameService") + } + var r0 v1beta3.HostnameServiceClient if rf, ok := ret.Get(0).(func() v1beta3.HostnameServiceClient); ok { r0 = rf() @@ -230,6 +248,10 @@ func (_c *Service_HostnameService_Call) RunAndReturn(run func() v1beta3.Hostname func (_m *Service) Ready() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Ready") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -273,6 +295,10 @@ func (_c *Service_Ready_Call) RunAndReturn(run func() <-chan struct{}) *Service_ func (_m *Service) Reserve(_a0 v1beta4.OrderID, _a1 deploymentv1beta3.ResourceGroup) (v1beta3.Reservation, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for Reserve") + } + var r0 v1beta3.Reservation var r1 error if rf, ok := ret.Get(0).(func(v1beta4.OrderID, deploymentv1beta3.ResourceGroup) (v1beta3.Reservation, error)); ok { @@ -328,6 +354,10 @@ func (_c *Service_Reserve_Call) RunAndReturn(run func(v1beta4.OrderID, deploymen func (_m *Service) Status(_a0 context.Context) (*v1beta3.Status, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for Status") + } + var r0 *v1beta3.Status var r1 error if rf, ok := ret.Get(0).(func(context.Context) (*v1beta3.Status, error)); ok { @@ -378,10 +408,72 @@ func (_c *Service_Status_Call) RunAndReturn(run func(context.Context) (*v1beta3. return _c } +// StatusV1 provides a mock function with given fields: _a0 +func (_m *Service) StatusV1(_a0 context.Context) (*v1.ClusterStatus, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for StatusV1") + } + + var r0 *v1.ClusterStatus + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*v1.ClusterStatus, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(context.Context) *v1.ClusterStatus); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*v1.ClusterStatus) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Service_StatusV1_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'StatusV1' +type Service_StatusV1_Call struct { + *mock.Call +} + +// StatusV1 is a helper method to define mock.On call +// - _a0 context.Context +func (_e *Service_Expecter) StatusV1(_a0 interface{}) *Service_StatusV1_Call { + return &Service_StatusV1_Call{Call: _e.mock.On("StatusV1", _a0)} +} + +func (_c *Service_StatusV1_Call) Run(run func(_a0 context.Context)) *Service_StatusV1_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *Service_StatusV1_Call) Return(_a0 *v1.ClusterStatus, _a1 error) *Service_StatusV1_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Service_StatusV1_Call) RunAndReturn(run func(context.Context) (*v1.ClusterStatus, error)) *Service_StatusV1_Call { + _c.Call.Return(run) + return _c +} + // TransferHostname provides a mock function with given fields: ctx, leaseID, hostname, serviceName, externalPort func (_m *Service) TransferHostname(ctx context.Context, leaseID v1beta4.LeaseID, hostname string, serviceName string, externalPort uint32) error { ret := _m.Called(ctx, leaseID, hostname, serviceName, externalPort) + if len(ret) == 0 { + panic("no return value specified for TransferHostname") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, v1beta4.LeaseID, string, string, uint32) error); ok { r0 = rf(ctx, leaseID, hostname, serviceName, externalPort) @@ -428,6 +520,10 @@ func (_c *Service_TransferHostname_Call) RunAndReturn(run func(context.Context, func (_m *Service) Unreserve(_a0 v1beta4.OrderID) error { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for Unreserve") + } + var r0 error if rf, ok := ret.Get(0).(func(v1beta4.OrderID) error); ok { r0 = rf(_a0) diff --git a/cluster/service.go b/cluster/service.go index 25fa3627..edb65031 100644 --- a/cluster/service.go +++ b/cluster/service.go @@ -4,8 +4,10 @@ import ( "context" dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" + provider "github.com/akash-network/akash-api/go/provider/v1" "github.com/boz/go-lifecycle" sdktypes "github.com/cosmos/cosmos-sdk/types" + tpubsub "github.com/troian/pubsub" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" @@ -22,12 +24,15 @@ import ( "github.com/akash-network/provider/operator/waiter" crd "github.com/akash-network/provider/pkg/apis/akash.network/v2beta2" "github.com/akash-network/provider/session" + "github.com/akash-network/provider/tools/fromctx" + ptypes "github.com/akash-network/provider/types" ) // ErrNotRunning is the error when service is not running var ( ErrNotRunning = errors.New("not running") ErrInvalidResource = errors.New("invalid resource") + errNoManifestGroup = errors.New("no manifest group could be found") ) var ( @@ -39,6 +44,38 @@ var ( }) ) +type service struct { + session session.Session + client Client + bus pubsub.Bus + sub pubsub.Subscriber + + inventory *inventoryService + hostnames *hostnameService + + checkDeploymentExistsRequestCh chan checkDeploymentExistsRequest + statusch chan chan<- *ctypes.Status + statusV1ch chan chan<- uint32 + managers map[mtypes.LeaseID]*deploymentManager + + managerch chan *deploymentManager + + log log.Logger + lc lifecycle.Lifecycle + + waiter waiter.OperatorWaiter + + config Config +} + +type checkDeploymentExistsRequest struct { + owner sdktypes.Address + dseq uint64 + gseq uint32 + + responseCh chan<- mtypes.LeaseID +} + // Cluster is the interface that wraps Reserve and Unreserve methods // //go:generate mockery --name Cluster @@ -50,6 +87,7 @@ type Cluster interface { // StatusClient is the interface which includes status of service type StatusClient interface { Status(context.Context) (*ctypes.Status, error) + StatusV1(context.Context) (*provider.ClusterStatus, error) FindActiveLease(ctx context.Context, owner sdktypes.Address, dseq uint64, gseq uint32) (bool, mtypes.LeaseID, crd.ManifestGroup, error) } @@ -83,7 +121,7 @@ func NewService(ctx context.Context, session session.Session, bus pubsub.Bus, cl return nil, err } - inventory, err := newInventoryService(cfg, log, lc.ShuttingDown(), sub, client, ipOperatorClient, waiter, deployments) + inventory, err := newInventoryService(ctx, cfg, log, sub, client, ipOperatorClient, waiter, deployments) if err != nil { sub.Close() return nil, err @@ -115,6 +153,7 @@ func NewService(ctx context.Context, session session.Session, bus pubsub.Bus, cl sub: sub, inventory: inventory, statusch: make(chan chan<- *ctypes.Status), + statusV1ch: make(chan chan<- uint32), managers: make(map[mtypes.LeaseID]*deploymentManager), managerch: make(chan *deploymentManager), checkDeploymentExistsRequestCh: make(chan checkDeploymentExistsRequest), @@ -131,39 +170,6 @@ func NewService(ctx context.Context, session session.Session, bus pubsub.Bus, cl return s, nil } -type service struct { - session session.Session - client Client - bus pubsub.Bus - sub pubsub.Subscriber - - inventory *inventoryService - hostnames *hostnameService - - checkDeploymentExistsRequestCh chan checkDeploymentExistsRequest - statusch chan chan<- *ctypes.Status - managers map[mtypes.LeaseID]*deploymentManager - - managerch chan *deploymentManager - - log log.Logger - lc lifecycle.Lifecycle - - waiter waiter.OperatorWaiter - - config Config -} - -type checkDeploymentExistsRequest struct { - owner sdktypes.Address - dseq uint64 - gseq uint32 - - responseCh chan<- mtypes.LeaseID -} - -var errNoManifestGroup = errors.New("no manifest group could be found") - func (s *service) FindActiveLease(ctx context.Context, owner sdktypes.Address, dseq uint64, gseq uint32) (bool, mtypes.LeaseID, crd.ManifestGroup, error) { response := make(chan mtypes.LeaseID, 1) req := checkDeploymentExistsRequest{ @@ -258,6 +264,39 @@ func (s *service) Status(ctx context.Context) (*ctypes.Status, error) { } } +func (s *service) StatusV1(ctx context.Context) (*provider.ClusterStatus, error) { + istatus, err := s.inventory.statusV1(ctx) + if err != nil { + return nil, err + } + + ch := make(chan uint32, 1) + + select { + case <-s.lc.Done(): + return nil, ErrNotRunning + case <-ctx.Done(): + return nil, ctx.Err() + case s.statusV1ch <- ch: + } + + select { + case <-s.lc.Done(): + return nil, ErrNotRunning + case <-ctx.Done(): + return nil, ctx.Err() + case result := <-ch: + res := &provider.ClusterStatus{ + Leases: provider.Leases{ + Active: result, + }, + Inventory: *istatus, + } + + return res, nil + } +} + func (s *service) updateDeploymentManagerGauge() { deploymentManagerGauge.Set(float64(len(s.managers))) } @@ -275,11 +314,29 @@ func (s *service) run(ctx context.Context, deployments []ctypes.IDeployment) { return } + bus := fromctx.PubSubFromCtx(ctx) + + inventorych := bus.Sub(ptypes.PubSubTopicInventoryStatus) + for _, deployment := range deployments { s.managers[deployment.LeaseID()] = newDeploymentManager(s, deployment, false) s.updateDeploymentManagerGauge() } + signalch := make(chan struct{}, 1) + + trySignal := func() { + select { + case signalch <- struct{}{}: + case <-ctx.Done(): + default: + } + } + + // var inv provider.Inventory + + trySignal() + loop: for { select { @@ -319,6 +376,8 @@ loop: } s.managers[key] = newDeploymentManager(s, deployment, true) + + trySignal() case mtypes.EventLeaseClosed: _ = s.bus.Publish(event.LeaseRemoveFundsMonitor{LeaseID: ev.ID}) s.teardownLease(ev.ID) @@ -327,6 +386,24 @@ loop: ch <- &ctypes.Status{ Leases: uint32(len(s.managers)), } + case ch := <-s.statusV1ch: + ch <- uint32(len(s.managers)) + case <-signalch: + istatus, _ := s.inventory.statusV1(ctx) + + if istatus == nil { + continue + } + + msg := provider.ClusterStatus{ + Leases: provider.Leases{Active: uint32(len(s.managers))}, + Inventory: *istatus, + } + bus.Pub(msg, []string{ptypes.PubSubTopicClusterStatus}, tpubsub.WithRetain()) + case _ = <-inventorych: + // inv = val.(provider.Inventory) + + trySignal() case dm := <-s.managerch: s.log.Info("manager done", "lease", dm.deployment.LeaseID()) @@ -338,6 +415,7 @@ loop: } delete(s.managers, dm.deployment.LeaseID()) + trySignal() case req := <-s.checkDeploymentExistsRequestCh: s.doCheckDeploymentExists(req) } diff --git a/cluster/types/v1beta3/types.go b/cluster/types/v1beta3/types.go index e41f2fff..fa95cfbe 100644 --- a/cluster/types/v1beta3/types.go +++ b/cluster/types/v1beta3/types.go @@ -7,6 +7,7 @@ import ( "io" "strings" + inventoryV1 "github.com/akash-network/akash-api/go/inventory/v1" dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/pkg/errors" @@ -16,6 +17,7 @@ import ( manifest "github.com/akash-network/akash-api/go/manifest/v2beta2" types "github.com/akash-network/akash-api/go/node/types/v1beta3" + "github.com/akash-network/akash-api/go/util/units" ) var ( @@ -61,20 +63,38 @@ type InventoryNodeMetric struct { StorageEphemeral uint64 `json:"storage_ephemeral"` } -type GPUAttributes map[string][]string +type GPUModelAttributes struct { + RAM string + Interface string +} + +type GPUModels map[string]*GPUModelAttributes + +type GPUAttributes map[string]GPUModels type StorageAttributes struct { Persistent bool `json:"persistent"` Class string `json:"class,omitempty"` } +func (m GPUModels) ExistsOrWildcard(model string) (*GPUModelAttributes, bool) { + attr, exists := m[model] + if exists { + return attr, true + } + + attr, exists = m["*"] + + return attr, exists +} + func ParseGPUAttributes(attrs types.Attributes) (GPUAttributes, error) { - var nvidia []string - var amd []string + nvidia := make(GPUModels) + amd := make(GPUModels) for _, attr := range attrs { tokens := strings.Split(attr.Key, "/") - if len(tokens) != 4 { + if len(tokens) < 4 || len(tokens)%2 != 0 { return GPUAttributes{}, fmt.Errorf("invalid GPU attribute") // nolint: goerr113 } @@ -84,13 +104,55 @@ func ParseGPUAttributes(attrs types.Attributes) (GPUAttributes, error) { return GPUAttributes{}, fmt.Errorf("unexpected GPU attribute type (%s)", tokens[0]) // nolint: goerr113 } - switch tokens[1] { + switch tokens[2] { + case "model": + default: + return GPUAttributes{}, fmt.Errorf("unexpected GPU attribute type (%s)", tokens[2]) // nolint: goerr113 + } + + vendor := tokens[1] + model := tokens[3] + + var mattrs *GPUModelAttributes + + if len(tokens) > 4 { + mattrs = &GPUModelAttributes{} + + tokens = tokens[4:] + + for i := 0; i < len(tokens); i += 2 { + key := tokens[i] + val := tokens[i+1] + + switch key { + case "ram": + q, err := units.MemoryQuantityFromString(val) + if err != nil { + return GPUAttributes{}, err + } + + mattrs.RAM = q.StringWithSuffix("Gi") + case "interface": + switch val { + case "pcie": + case "sxm": + default: + return GPUAttributes{}, fmt.Errorf("unsupported GPU interface (%s)", val) // nolint: goerr113 + } + + mattrs.Interface = val + default: + } + } + } + + switch vendor { case "nvidia": - nvidia = append(nvidia, tokens[3]) + nvidia[model] = mattrs case "amd": - amd = append(amd, tokens[3]) + amd[model] = mattrs default: - return GPUAttributes{}, fmt.Errorf("unsupported GPU vendor (%s)", tokens[1]) // nolint: goerr113 + return GPUAttributes{}, fmt.Errorf("unsupported GPU vendor (%s)", vendor) // nolint: goerr113 } } @@ -215,6 +277,7 @@ func WithDryRun() InventoryOption { type Inventory interface { Adjust(ReservationGroup, ...InventoryOption) error Metrics() InventoryMetrics + Snapshot() inventoryV1.Cluster } // ServiceLog stores name, stream and scanner diff --git a/cmd/provider-services/cmd/flags/kube_config.go b/cmd/provider-services/cmd/flags/kube_config.go index e3304855..cf3e48bf 100644 --- a/cmd/provider-services/cmd/flags/kube_config.go +++ b/cmd/provider-services/cmd/flags/kube_config.go @@ -10,6 +10,6 @@ const ( ) func AddKubeConfigPathFlag(cmd *cobra.Command) error { - cmd.Flags().String(FlagKubeConfig, "$HOME/.kube/config", "kubernetes configuration file path") - return viper.BindPFlag(FlagKubeConfig, cmd.Flags().Lookup(FlagKubeConfig)) + cmd.PersistentFlags().String(FlagKubeConfig, "$HOME/.kube/config", "kubernetes configuration file path") + return viper.BindPFlag(FlagKubeConfig, cmd.PersistentFlags().Lookup(FlagKubeConfig)) } diff --git a/cmd/provider-services/cmd/root.go b/cmd/provider-services/cmd/root.go index ecd3729b..23df1192 100644 --- a/cmd/provider-services/cmd/root.go +++ b/cmd/provider-services/cmd/root.go @@ -25,7 +25,6 @@ import ( ) func NewRootCmd() *cobra.Command { - encodingConfig := app.MakeEncodingConfig() cmd := &cobra.Command{ diff --git a/cmd/provider-services/cmd/run.go b/cmd/provider-services/cmd/run.go index 04942522..e24c8498 100644 --- a/cmd/provider-services/cmd/run.go +++ b/cmd/provider-services/cmd/run.go @@ -16,6 +16,7 @@ import ( "github.com/shopspring/decimal" "github.com/spf13/cobra" "github.com/spf13/viper" + tpubsub "github.com/troian/pubsub" "golang.org/x/sync/errgroup" "github.com/tendermint/tendermint/libs/log" @@ -43,9 +44,11 @@ import ( "github.com/akash-network/provider/cluster/operatorclients" providerflags "github.com/akash-network/provider/cmd/provider-services/cmd/flags" cmdutil "github.com/akash-network/provider/cmd/provider-services/cmd/util" + gwgrpc "github.com/akash-network/provider/gateway/grpc" gwrest "github.com/akash-network/provider/gateway/rest" "github.com/akash-network/provider/operator/waiter" "github.com/akash-network/provider/session" + "github.com/akash-network/provider/tools/fromctx" ) const ( @@ -120,6 +123,30 @@ func RunCmd() *cobra.Command { return errors.Errorf(`flag "%s" value must be > "%s"`, FlagWithdrawalPeriod, FlagLeaseFundsMonitorInterval) // nolint: goerr113 } + group, ctx := errgroup.WithContext(cmd.Context()) + cmd.SetContext(ctx) + + startupch := make(chan struct{}, 1) + + fromctx.CmdSetContextValue(cmd, fromctx.CtxKeyStartupCh, (chan<- struct{})(startupch)) + fromctx.CmdSetContextValue(cmd, fromctx.CtxKeyErrGroup, group) + fromctx.CmdSetContextValue(cmd, fromctx.CtxKeyLogc, cmdutil.OpenLogger().With("cmp", "provider")) + + pctx, pcancel := context.WithCancel(context.Background()) + fromctx.CmdSetContextValue(cmd, fromctx.CtxKeyPubSub, tpubsub.New(pctx, 1000)) + + go func() { + defer pcancel() + + select { + case <-ctx.Done(): + return + case <-startupch: + } + + _ = group.Wait() + }() + return nil }, RunE: func(cmd *cobra.Command, args []string) error { @@ -131,7 +158,7 @@ func RunCmd() *cobra.Command { cmd.Flags().String(flags.FlagChainID, "", "The network chain ID") if err := viper.BindPFlag(flags.FlagChainID, cmd.Flags().Lookup(flags.FlagChainID)); err != nil { - return nil + panic(err) } flags.AddTxFlagsToCmd(cmd) @@ -140,211 +167,211 @@ func RunCmd() *cobra.Command { cmd.Flags().Bool(FlagClusterK8s, false, "Use Kubernetes cluster") if err := viper.BindPFlag(FlagClusterK8s, cmd.Flags().Lookup(FlagClusterK8s)); err != nil { - return nil + panic(err) } cmd.Flags().String(providerflags.FlagK8sManifestNS, "lease", "Cluster manifest namespace") if err := viper.BindPFlag(providerflags.FlagK8sManifestNS, cmd.Flags().Lookup(providerflags.FlagK8sManifestNS)); err != nil { - return nil + panic(err) } cmd.Flags().String(FlagGatewayListenAddress, "0.0.0.0:8443", "Gateway listen address") if err := viper.BindPFlag(FlagGatewayListenAddress, cmd.Flags().Lookup(FlagGatewayListenAddress)); err != nil { - return nil + panic(err) } cmd.Flags().String(FlagBidPricingStrategy, "scale", "Pricing strategy to use") if err := viper.BindPFlag(FlagBidPricingStrategy, cmd.Flags().Lookup(FlagBidPricingStrategy)); err != nil { - return nil + panic(err) } cmd.Flags().String(FlagBidPriceCPUScale, "0", "cpu pricing scale in uakt per millicpu") if err := viper.BindPFlag(FlagBidPriceCPUScale, cmd.Flags().Lookup(FlagBidPriceCPUScale)); err != nil { - return nil + panic(err) } cmd.Flags().String(FlagBidPriceMemoryScale, "0", "memory pricing scale in uakt per megabyte") if err := viper.BindPFlag(FlagBidPriceMemoryScale, cmd.Flags().Lookup(FlagBidPriceMemoryScale)); err != nil { - return nil + panic(err) } cmd.Flags().String(FlagBidPriceStorageScale, "0", "storage pricing scale in uakt per megabyte") if err := viper.BindPFlag(FlagBidPriceStorageScale, cmd.Flags().Lookup(FlagBidPriceStorageScale)); err != nil { - return nil + panic(err) } cmd.Flags().String(FlagBidPriceEndpointScale, "0", "endpoint pricing scale in uakt") if err := viper.BindPFlag(FlagBidPriceEndpointScale, cmd.Flags().Lookup(FlagBidPriceEndpointScale)); err != nil { - return nil + panic(err) } cmd.Flags().String(FlagBidPriceIPScale, "0", "leased ip pricing scale in uakt") if err := viper.BindPFlag(FlagBidPriceIPScale, cmd.Flags().Lookup(FlagBidPriceIPScale)); err != nil { - return nil + panic(err) } cmd.Flags().String(FlagBidPriceScriptPath, "", "path to script to run for computing bid price") if err := viper.BindPFlag(FlagBidPriceScriptPath, cmd.Flags().Lookup(FlagBidPriceScriptPath)); err != nil { - return nil + panic(err) } cmd.Flags().Uint(FlagBidPriceScriptProcessLimit, 32, "limit to the number of scripts run concurrently for bid pricing") if err := viper.BindPFlag(FlagBidPriceScriptProcessLimit, cmd.Flags().Lookup(FlagBidPriceScriptProcessLimit)); err != nil { - return nil + panic(err) } cmd.Flags().Duration(FlagBidPriceScriptTimeout, time.Second*10, "execution timelimit for bid pricing as a duration") if err := viper.BindPFlag(FlagBidPriceScriptTimeout, cmd.Flags().Lookup(FlagBidPriceScriptTimeout)); err != nil { - return nil + panic(err) } cmd.Flags().String(FlagBidDeposit, cfg.BidDeposit.String(), "Bid deposit amount") if err := viper.BindPFlag(FlagBidDeposit, cmd.Flags().Lookup(FlagBidDeposit)); err != nil { - return nil + panic(err) } cmd.Flags().String(FlagClusterPublicHostname, "", "The public IP of the Kubernetes cluster") if err := viper.BindPFlag(FlagClusterPublicHostname, cmd.Flags().Lookup(FlagClusterPublicHostname)); err != nil { - return nil + panic(err) } cmd.Flags().Uint(FlagClusterNodePortQuantity, 1, "The number of node ports available on the Kubernetes cluster") if err := viper.BindPFlag(FlagClusterNodePortQuantity, cmd.Flags().Lookup(FlagClusterNodePortQuantity)); err != nil { - return nil + panic(err) } cmd.Flags().Duration(FlagClusterWaitReadyDuration, time.Second*5, "The time to wait for the cluster to be available") if err := viper.BindPFlag(FlagClusterWaitReadyDuration, cmd.Flags().Lookup(FlagClusterWaitReadyDuration)); err != nil { - return nil + panic(err) } cmd.Flags().Duration(FlagInventoryResourcePollPeriod, time.Second*5, "The period to poll the cluster inventory") if err := viper.BindPFlag(FlagInventoryResourcePollPeriod, cmd.Flags().Lookup(FlagInventoryResourcePollPeriod)); err != nil { - return nil + panic(err) } cmd.Flags().Uint(FlagInventoryResourceDebugFrequency, 10, "The rate at which to log all inventory resources") if err := viper.BindPFlag(FlagInventoryResourceDebugFrequency, cmd.Flags().Lookup(FlagInventoryResourceDebugFrequency)); err != nil { - return nil + panic(err) } cmd.Flags().Bool(FlagDeploymentIngressStaticHosts, false, "") if err := viper.BindPFlag(FlagDeploymentIngressStaticHosts, cmd.Flags().Lookup(FlagDeploymentIngressStaticHosts)); err != nil { - return nil + panic(err) } cmd.Flags().String(FlagDeploymentIngressDomain, "", "") if err := viper.BindPFlag(FlagDeploymentIngressDomain, cmd.Flags().Lookup(FlagDeploymentIngressDomain)); err != nil { - return nil + panic(err) } cmd.Flags().Bool(FlagDeploymentIngressExposeLBHosts, false, "") if err := viper.BindPFlag(FlagDeploymentIngressExposeLBHosts, cmd.Flags().Lookup(FlagDeploymentIngressExposeLBHosts)); err != nil { - return nil + panic(err) } cmd.Flags().Bool(FlagDeploymentNetworkPoliciesEnabled, true, "Enable network policies") if err := viper.BindPFlag(FlagDeploymentNetworkPoliciesEnabled, cmd.Flags().Lookup(FlagDeploymentNetworkPoliciesEnabled)); err != nil { - return nil + panic(err) } cmd.Flags().String(FlagDockerImagePullSecretsName, "", "Name of the local image pull secret configured with kubectl") if err := viper.BindPFlag(FlagDockerImagePullSecretsName, cmd.Flags().Lookup(FlagDockerImagePullSecretsName)); err != nil { - return nil + panic(err) } cmd.Flags().Uint64(FlagOvercommitPercentMemory, 0, "Percentage of memory overcommit") if err := viper.BindPFlag(FlagOvercommitPercentMemory, cmd.Flags().Lookup(FlagOvercommitPercentMemory)); err != nil { - return nil + panic(err) } cmd.Flags().Uint64(FlagOvercommitPercentCPU, 0, "Percentage of CPU overcommit") if err := viper.BindPFlag(FlagOvercommitPercentCPU, cmd.Flags().Lookup(FlagOvercommitPercentCPU)); err != nil { - return nil + panic(err) } cmd.Flags().Uint64(FlagOvercommitPercentStorage, 0, "Percentage of storage overcommit") if err := viper.BindPFlag(FlagOvercommitPercentStorage, cmd.Flags().Lookup(FlagOvercommitPercentStorage)); err != nil { - return nil + panic(err) } cmd.Flags().StringSlice(FlagDeploymentBlockedHostnames, nil, "hostnames blocked for deployments") if err := viper.BindPFlag(FlagDeploymentBlockedHostnames, cmd.Flags().Lookup(FlagDeploymentBlockedHostnames)); err != nil { - return nil + panic(err) } cmd.Flags().String(FlagAuthPem, "", "") if err := providerflags.AddKubeConfigPathFlag(cmd); err != nil { - return nil + panic(err) } cmd.Flags().String(FlagDeploymentRuntimeClass, "gvisor", "kubernetes runtime class for deployments, use none for no specification") if err := viper.BindPFlag(FlagDeploymentRuntimeClass, cmd.Flags().Lookup(FlagDeploymentRuntimeClass)); err != nil { - return nil + panic(err) } cmd.Flags().Duration(FlagBidTimeout, 5*time.Minute, "time after which bids are cancelled if no lease is created") if err := viper.BindPFlag(FlagBidTimeout, cmd.Flags().Lookup(FlagBidTimeout)); err != nil { - return nil + panic(err) } cmd.Flags().Duration(FlagManifestTimeout, 5*time.Minute, "time after which bids are cancelled if no manifest is received") if err := viper.BindPFlag(FlagManifestTimeout, cmd.Flags().Lookup(FlagManifestTimeout)); err != nil { - return nil + panic(err) } cmd.Flags().String(FlagMetricsListener, "", "ip and port to start the metrics listener on") if err := viper.BindPFlag(FlagMetricsListener, cmd.Flags().Lookup(FlagMetricsListener)); err != nil { - return nil + panic(err) } cmd.Flags().Duration(FlagWithdrawalPeriod, time.Hour*24, "period at which withdrawals are made from the escrow accounts") if err := viper.BindPFlag(FlagWithdrawalPeriod, cmd.Flags().Lookup(FlagWithdrawalPeriod)); err != nil { - return nil + panic(err) } cmd.Flags().Duration(FlagLeaseFundsMonitorInterval, time.Minute*10, "interval at which lease is checked for funds available on the escrow accounts. >= 1m") if err := viper.BindPFlag(FlagLeaseFundsMonitorInterval, cmd.Flags().Lookup(FlagLeaseFundsMonitorInterval)); err != nil { - return nil + panic(err) } cmd.Flags().Uint64(FlagMinimumBalance, mparams.DefaultBidMinDeposit.Amount.Mul(sdk.NewIntFromUint64(2)).Uint64(), "minimum account balance at which withdrawal is started") if err := viper.BindPFlag(FlagMinimumBalance, cmd.Flags().Lookup(FlagMinimumBalance)); err != nil { - return nil + panic(err) } cmd.Flags().String(FlagProviderConfig, "", "provider configuration file path") if err := viper.BindPFlag(FlagProviderConfig, cmd.Flags().Lookup(FlagProviderConfig)); err != nil { - return nil + panic(err) } cmd.Flags().Duration(FlagRPCQueryTimeout, time.Minute, "timeout for requests made to the RPC node") if err := viper.BindPFlag(FlagRPCQueryTimeout, cmd.Flags().Lookup(FlagRPCQueryTimeout)); err != nil { - return nil + panic(err) } cmd.Flags().Duration(FlagCachedResultMaxAge, 5*time.Second, "max. cache age for results from the RPC node") if err := viper.BindPFlag(FlagCachedResultMaxAge, cmd.Flags().Lookup(FlagCachedResultMaxAge)); err != nil { - return nil + panic(err) } cmd.Flags().Bool(FlagEnableIPOperator, false, "enable usage of the IP operator to lease IP addresses") if err := viper.BindPFlag(FlagEnableIPOperator, cmd.Flags().Lookup(FlagEnableIPOperator)); err != nil { - return nil + panic(err) } cmd.Flags().Duration(FlagTxBroadcastTimeout, 30*time.Second, "tx broadcast timeout. defaults to 30s") if err := viper.BindPFlag(FlagTxBroadcastTimeout, cmd.Flags().Lookup(FlagTxBroadcastTimeout)); err != nil { - return nil + panic(err) } if err := providerflags.AddServiceEndpointFlag(cmd, serviceHostnameOperator); err != nil { - return nil + panic(err) } if err := providerflags.AddServiceEndpointFlag(cmd, serviceIPOperator); err != nil { - return nil + panic(err) } return cmd @@ -450,7 +477,6 @@ func doRunCmd(ctx context.Context, cmd *cobra.Command, _ []string) error { dockerImagePullSecretsName := viper.GetString(FlagDockerImagePullSecretsName) strategy := viper.GetString(FlagBidPricingStrategy) deploymentIngressExposeLBHosts := viper.GetBool(FlagDeploymentIngressExposeLBHosts) - // from := viper.GetString(flags.FlagFrom) overcommitPercentStorage := 1.0 + float64(viper.GetUint64(FlagOvercommitPercentStorage)/100.0) overcommitPercentCPU := 1.0 + float64(viper.GetUint64(FlagOvercommitPercentCPU)/100.0) // no GPU overcommit @@ -466,14 +492,14 @@ func doRunCmd(ctx context.Context, cmd *cobra.Command, _ []string) error { cachedResultMaxAge := viper.GetDuration(FlagCachedResultMaxAge) rpcQueryTimeout := viper.GetDuration(FlagRPCQueryTimeout) enableIPOperator := viper.GetBool(FlagEnableIPOperator) - // txTimeout := viper.GetDuration(FlagTxBroadcastTimeout) pricing, err := createBidPricingStrategy(strategy) if err != nil { return err } - logger := cmdutil.OpenLogger().With("cmp", "provider") + logger := fromctx.LogcFromCtx(cmd.Context()) + kubeConfig, err := clientcommon.OpenKubeConfig(kubeConfigPath, logger) if err != nil { return err @@ -484,24 +510,18 @@ func doRunCmd(ctx context.Context, cmd *cobra.Command, _ []string) error { metricsRouter = makeMetricsRouter() } - clGroup, clCtx := errgroup.WithContext(ctx) + group := fromctx.ErrGroupFromCtx(ctx) cctx, err := sdkclient.GetClientTxContext(cmd) if err != nil { return err } - cl, err := client.DiscoverClient(clCtx, cctx, cmd.Flags()) + cl, err := client.DiscoverClient(ctx, cctx, cmd.Flags()) if err != nil { return err } - // keyname := cctx.GetFromName() - // info, err := txFactory.Keybase().Key(keyname) - // if err != nil { - // return err - // } - gwaddr := viper.GetString(FlagGatewayListenAddress) var certFromFlag io.Reader @@ -586,7 +606,7 @@ func doRunCmd(ctx context.Context, cmd *cobra.Command, _ []string) error { bus := pubsub.NewBus() defer bus.Close() - group, ctx := errgroup.WithContext(clCtx) + // group, ctx := errgroup.WithContext(clCtx) // Provider service creation config := provider.NewDefaultConfig() @@ -667,7 +687,9 @@ func doRunCmd(ctx context.Context, cmd *cobra.Command, _ []string) error { return err } - gateway, err := gwrest.NewServer( + ctx = context.WithValue(ctx, fromctx.CtxKeyErrGroup, group) + + gwRest, err := gwrest.NewServer( ctx, logger, service, @@ -682,9 +704,14 @@ func doRunCmd(ctx context.Context, cmd *cobra.Command, _ []string) error { return err } - clGroup.Go(func() error { - return group.Wait() - }) + err = gwgrpc.NewServer(ctx, ":8081", []tls.Certificate{tlsCert}, service) + if err != nil { + return err + } + + // clGroup.Go(func() error { + // return group.Wait() + // }) group.Go(func() error { return events.Publish(ctx, cctx.Client, "provider-cli", bus) @@ -697,12 +724,12 @@ func doRunCmd(ctx context.Context, cmd *cobra.Command, _ []string) error { group.Go(func() error { // certificates are supplied via tls.Config - return gateway.ListenAndServeTLS("", "") + return gwRest.ListenAndServeTLS("", "") }) group.Go(func() error { <-ctx.Done() - return gateway.Close() + return gwRest.Close() }) if metricsRouter != nil { @@ -722,7 +749,9 @@ func doRunCmd(ctx context.Context, cmd *cobra.Command, _ []string) error { }) } - err = clGroup.Wait() + fromctx.StartupChFromCtx(ctx) <- struct{}{} + + err = group.Wait() if ipOperatorClient != nil { ipOperatorClient.Stop() diff --git a/cmd/provider-services/main.go b/cmd/provider-services/main.go index 66254bf1..44fb1594 100644 --- a/cmd/provider-services/main.go +++ b/cmd/provider-services/main.go @@ -1,7 +1,10 @@ package main import ( + "context" + "errors" "os" + "os/signal" "github.com/cosmos/cosmos-sdk/server" @@ -10,16 +13,22 @@ import ( pcmd "github.com/akash-network/provider/cmd/provider-services/cmd" ) -// In main we call the rootCmd -func main() { +func run() error { + ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt) + defer stop() + rootCmd := pcmd.NewRootCmd() - if err := acmd.Execute(rootCmd, "AP"); err != nil { - switch e := err.(type) { - case server.ErrorCode: - os.Exit(e.Code) - default: - os.Exit(1) + return acmd.ExecuteWithCtx(ctx, rootCmd, "AP") +} + +func main() { + err := run() + if err != nil { + if errors.As(err, &server.ErrorCode{}) { + os.Exit(err.(server.ErrorCode).Code) } + + os.Exit(1) } } diff --git a/gateway/grpc/server.go b/gateway/grpc/server.go new file mode 100644 index 00000000..eabe87a6 --- /dev/null +++ b/gateway/grpc/server.go @@ -0,0 +1,205 @@ +package grpc + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "net" + "time" + + sdk "github.com/cosmos/cosmos-sdk/types" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/reflection" + "google.golang.org/protobuf/types/known/emptypb" + + ctypes "github.com/akash-network/akash-api/go/node/cert/v1beta3" + providerv1 "github.com/akash-network/akash-api/go/provider/v1" + + "github.com/akash-network/provider" + "github.com/akash-network/provider/tools/fromctx" + ptypes "github.com/akash-network/provider/types" +) + +type ContextKey string + +const ( + ContextKeyQueryClient = ContextKey("query-client") + ContextKeyOwner = ContextKey("owner") +) + +type grpcProviderV1 struct { + providerv1.ProviderRPCServer + ctx context.Context + client provider.StatusClient +} + +func QueryClientFromCtx(ctx context.Context) ctypes.QueryClient { + val := ctx.Value(ContextKeyQueryClient) + if val == nil { + panic("context does not have pubsub set") + } + + return val.(ctypes.QueryClient) +} + +func ContextWithOwner(ctx context.Context, address sdk.Address) context.Context { + return context.WithValue(ctx, ContextKeyOwner, address) +} + +func OwnerFromCtx(ctx context.Context) sdk.Address { + val := ctx.Value(ContextKeyOwner) + if val == nil { + return sdk.AccAddress{} + } + + return val.(sdk.Address) +} + +func NewServer(ctx context.Context, endpoint string, certs []tls.Certificate, client provider.StatusClient) error { + // InsecureSkipVerify is set to true due to inability to use normal TLS verification + // certificate validation and authentication performed later in mtlsHandler + tlsConfig := &tls.Config{ + Certificates: certs, + ClientAuth: tls.RequestClientCert, + InsecureSkipVerify: true, // nolint: gosec + MinVersion: tls.VersionTLS13, + } + + group := fromctx.ErrGroupFromCtx(ctx) + log := fromctx.LogcFromCtx(ctx) + + grpcSrv := grpc.NewServer(grpc.Creds(credentials.NewTLS(tlsConfig)), grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ + MinTime: 30 * time.Second, + PermitWithoutStream: false, + }), grpc.ChainUnaryInterceptor(mtlsInterceptor())) + + providerv1.RegisterProviderRPCServer(grpcSrv, &grpcProviderV1{ + ctx: ctx, + client: client, + }) + + reflection.Register(grpcSrv) + + group.Go(func() error { + grpcLis, err := net.Listen("tcp", endpoint) + if err != nil { + return err + } + + log.Info(fmt.Sprintf("grpc listening on \"%s\"", endpoint)) + + return grpcSrv.Serve(grpcLis) + }) + + group.Go(func() error { + <-ctx.Done() + + grpcSrv.GracefulStop() + + return ctx.Err() + }) + + return nil +} + +func mtlsInterceptor() grpc.UnaryServerInterceptor { + return func(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { + if p, ok := peer.FromContext(ctx); ok { + if mtls, ok := p.AuthInfo.(credentials.TLSInfo); ok { + certificates := mtls.State.PeerCertificates + + if len(certificates) > 0 { + if len(certificates) != 1 { + return nil, fmt.Errorf("tls: invalid certificate chain") // nolint: goerr113 + } + + cquery := QueryClientFromCtx(ctx) + + cert := certificates[0] + + // validation + var owner sdk.Address + if owner, err = sdk.AccAddressFromBech32(cert.Subject.CommonName); err != nil { + return nil, fmt.Errorf("tls: invalid certificate's subject common name: %w", err) + } + + // 1. CommonName in issuer and Subject must match and be as Bech32 format + if cert.Subject.CommonName != cert.Issuer.CommonName { + return nil, fmt.Errorf("tls: invalid certificate's issuer common name: %w", err) + } + + // 2. serial number must be in + if cert.SerialNumber == nil { + return nil, fmt.Errorf("tls: invalid certificate serial number: %w", err) + } + + // 3. look up certificate on chain + var resp *ctypes.QueryCertificatesResponse + resp, err = cquery.Certificates( + ctx, + &ctypes.QueryCertificatesRequest{ + Filter: ctypes.CertificateFilter{ + Owner: owner.String(), + Serial: cert.SerialNumber.String(), + State: "valid", + }, + }, + ) + if err != nil { + return nil, fmt.Errorf("tls: unable to fetch certificate from chain: %w", err) + } + if (len(resp.Certificates) != 1) || !resp.Certificates[0].Certificate.IsState(ctypes.CertificateValid) { + return nil, errors.New("tls: attempt to use non-existing or revoked certificate") // nolint: goerr113 + } + + clientCertPool := x509.NewCertPool() + clientCertPool.AddCert(cert) + + opts := x509.VerifyOptions{ + Roots: clientCertPool, + CurrentTime: time.Now(), + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + MaxConstraintComparisions: 0, + } + + if _, err = cert.Verify(opts); err != nil { + return nil, fmt.Errorf("tls: unable to verify certificate: %w", err) + } + + ctx = ContextWithOwner(ctx, owner) + } + } + } + + return handler(ctx, req) + } +} + +func (gm *grpcProviderV1) GetStatus(ctx context.Context, _ *emptypb.Empty) (*providerv1.Status, error) { + return gm.client.StatusV1(ctx) +} + +func (gm *grpcProviderV1) StreamStatus(_ *emptypb.Empty, stream providerv1.ProviderRPC_StreamStatusServer) error { + bus := fromctx.PubSubFromCtx(gm.ctx) + + events := bus.Sub(ptypes.PubSubTopicProviderStatus) + + for { + select { + case <-gm.ctx.Done(): + return gm.ctx.Err() + case <-stream.Context().Done(): + return stream.Context().Err() + case evt := <-events: + val := evt.(providerv1.Status) + if err := stream.Send(&val); err != nil { + return err + } + } + } +} diff --git a/gateway/rest/router.go b/gateway/rest/router.go index a5f8116e..ea367978 100644 --- a/gateway/rest/router.go +++ b/gateway/rest/router.go @@ -7,7 +7,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "net/http/httputil" "net/url" @@ -32,6 +31,7 @@ import ( manifestValidation "github.com/akash-network/akash-api/go/manifest/v2beta2" dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" mtypes "github.com/akash-network/akash-api/go/node/market/v1beta4" + "github.com/akash-network/node/util/wsutil" "github.com/akash-network/provider" @@ -44,8 +44,6 @@ import ( "github.com/akash-network/provider/gateway/utils" pmanifest "github.com/akash-network/provider/manifest" ipoptypes "github.com/akash-network/provider/operator/ipoperator/types" - - v1 "github.com/akash-network/akash-api/go/inventory/v1" ) type CtxAuthKey string @@ -110,12 +108,6 @@ func newRouter(log log.Logger, addr sdk.Address, pclient provider.Client, ipopcl createStatusHandler(log, pclient, addr)). Methods("GET") - // GET /features - // provider features endpoint does not require authentication - router.HandleFunc("/features", - createFeaturesHandler(log, pclient, addr)). - Methods("GET") - vrouter := router.NewRoute().Subrouter() vrouter.Use(requireOwner()) @@ -415,7 +407,6 @@ func leaseShellHandler(log log.Logger, mclient pmanifest.Client, cclient cluster wg.Add(1) go leaseShellWebsocketHandler(localLog, wg, shellWs, stdinPipeOut, terminalSizeUpdate) - } l := &sync.Mutex{} @@ -533,52 +524,6 @@ func createStatusHandler(log log.Logger, sclient provider.StatusClient, provider } } -func createFeaturesHandler(log log.Logger, sclient provider.StatusClient, providerAddr sdk.Address) http.HandlerFunc { - return func(w http.ResponseWriter, req *http.Request) { - - // URLs slice and use in range allows execution in both dev and prod - urls := []string{ - "http://inventory-operator.akash-services.svc.cluster.local:8081/getClusterState", - "http://localhost:8081/getClusterState", - } - - var resp *http.Response - var err error - for _, url := range urls { - resp, err = http.Get(url) - if err != nil { - fmt.Printf("Failed to get '%s': %v\n", url, err) - continue - } - defer resp.Body.Close() - break - } - - if err != nil { - fmt.Printf("All attempts failed: %v\n", err) - return - } - - defer resp.Body.Close() - - bodyBytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - fmt.Println(err) - } - - var clusterState v1.Cluster - err = json.Unmarshal(bodyBytes, &clusterState) - if err != nil { - fmt.Println(err) - } - - fmt.Println("clusterState: ", clusterState) - - writeJSON(log, w, clusterState) - - } -} - func validateHandler(log log.Logger, cl provider.ValidateClient) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { data, err := io.ReadAll(req.Body) diff --git a/go.mod b/go.mod index f221e30e..612d0610 100644 --- a/go.mod +++ b/go.mod @@ -3,36 +3,40 @@ module github.com/akash-network/provider go 1.21 require ( - github.com/akash-network/akash-api v0.0.41 - github.com/akash-network/node v0.30.1-rc3 + github.com/akash-network/akash-api v0.0.43 + github.com/akash-network/node v0.30.1-rc4 github.com/avast/retry-go/v4 v4.5.0 + github.com/blang/semver/v4 v4.0.0 github.com/boz/go-lifecycle v0.1.1 github.com/cosmos/cosmos-sdk v0.45.16 - github.com/cskr/pubsub v1.0.2 + github.com/fsnotify/fsnotify v1.7.0 github.com/go-kit/kit v0.12.0 github.com/go-logr/logr v1.2.4 github.com/go-logr/zapr v1.2.4 github.com/golang-jwt/jwt/v4 v4.5.0 - github.com/google/uuid v1.3.0 + github.com/google/uuid v1.4.0 github.com/gorilla/context v1.1.1 github.com/gorilla/mux v1.8.0 github.com/gorilla/websocket v1.5.0 + github.com/jaypipes/ghw v0.12.0 github.com/moby/term v0.5.0 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.16.0 github.com/prometheus/common v0.44.0 github.com/rook/rook v1.11.1 github.com/shopspring/decimal v1.3.1 - github.com/spf13/cobra v1.7.0 + github.com/spf13/cobra v1.8.0 github.com/spf13/pflag v1.0.5 - github.com/spf13/viper v1.16.0 + github.com/spf13/viper v1.18.2 github.com/stretchr/testify v1.8.4 github.com/tendermint/tendermint v0.34.27 + github.com/troian/pubsub v0.1.0 github.com/vektra/mockery/v2 v2.40.1 go.uber.org/zap v1.24.0 - golang.org/x/net v0.14.0 - golang.org/x/sync v0.3.0 - google.golang.org/grpc v1.57.0 + golang.org/x/net v0.19.0 + golang.org/x/sync v0.6.0 + google.golang.org/grpc v1.59.0 + google.golang.org/protobuf v1.31.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.26.1 k8s.io/apimachinery v0.26.1 @@ -47,7 +51,7 @@ replace ( // use cosmos fork of keyring github.com/99designs/keyring => github.com/cosmos/keyring v1.2.0 - github.com/cosmos/ledger-cosmos-go => github.com/akash-network/ledger-go/cosmos v0.14.3 + github.com/cosmos/ledger-cosmos-go => github.com/akash-network/ledger-go/cosmos v0.14.4 // Fix upstream GHSA-h395-qcrw-5vmq vulnerability. // TODO Remove it: https://github.com/cosmos/cosmos-sdk/issues/10409 @@ -73,13 +77,13 @@ require ( github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d // indirect github.com/DataDog/zstd v1.5.0 // indirect github.com/HdrHistogram/hdrhistogram-go v1.1.2 // indirect + github.com/StackExchange/wmi v1.2.1 // indirect github.com/Workiva/go-datastructures v1.0.53 // indirect github.com/alessio/shellescape v1.4.1 // indirect github.com/armon/go-metrics v0.4.1 // indirect github.com/armon/go-radix v1.0.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 // indirect - github.com/blang/semver/v4 v4.0.0 // indirect github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect github.com/cenkalti/backoff/v3 v3.2.2 // indirect github.com/cespare/xxhash v1.1.0 // indirect @@ -116,12 +120,13 @@ require ( github.com/emicklei/go-restful/v3 v3.10.0 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect - github.com/fatih/color v1.13.0 // indirect + github.com/fatih/color v1.14.1 // indirect github.com/felixge/httpsnoop v1.0.2 // indirect - github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/getsentry/sentry-go v0.17.0 // indirect + github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.5.1 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/jsonreference v0.20.0 // indirect github.com/go-openapi/swag v0.22.3 // indirect @@ -147,7 +152,7 @@ require ( github.com/gtank/ristretto255 v0.1.2 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-hclog v1.3.1 // indirect + github.com/hashicorp/go-hclog v1.5.0 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-plugin v1.4.5 // indirect @@ -172,11 +177,12 @@ require ( github.com/imdario/mergo v0.3.13 // indirect github.com/improbable-eng/grpc-web v0.14.1 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jaypipes/pcidb v1.0.0 // indirect github.com/jinzhu/copier v0.3.5 // indirect github.com/jmhodges/levigo v1.0.1-0.20191019112844-b572e7f4cdac // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.15.11 // indirect + github.com/klauspost/compress v1.17.0 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/kube-object-storage/lib-bucket-provisioner v0.0.0-20221122204822-d1a8c34382f1 // indirect @@ -206,7 +212,7 @@ require ( github.com/oklog/run v1.1.0 // indirect github.com/openshift/api v0.0.0-20210105115604-44119421ec6b // indirect github.com/pelletier/go-toml v1.9.5 // indirect - github.com/pelletier/go-toml/v2 v2.0.8 // indirect + github.com/pelletier/go-toml/v2 v2.1.0 // indirect github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect github.com/pierrec/lz4 v2.6.1+incompatible // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect @@ -216,17 +222,19 @@ require ( github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/regen-network/cosmos-proto v0.3.1 // indirect github.com/rivo/uniseg v0.2.0 // indirect - github.com/rogpeppe/go-internal v1.10.0 // indirect + github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/rs/cors v1.8.2 // indirect github.com/rs/zerolog v1.30.0 // indirect github.com/ryanuber/go-glob v1.0.0 // indirect + github.com/sagikazarmark/locafero v0.4.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/sasha-s/go-deadlock v0.3.1 // indirect github.com/sirupsen/logrus v1.9.0 // indirect - github.com/spf13/afero v1.9.5 // indirect - github.com/spf13/cast v1.5.1 // indirect - github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.6.0 // indirect github.com/stretchr/objx v0.5.0 // indirect - github.com/subosito/gotenv v1.4.2 // indirect + github.com/subosito/gotenv v1.6.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c // indirect github.com/tendermint/go-amino v0.16.0 // indirect @@ -238,22 +246,21 @@ require ( go.etcd.io/bbolt v1.3.6 // indirect go.step.sm/crypto v0.34.0 // indirect go.uber.org/atomic v1.10.0 // indirect - go.uber.org/multierr v1.8.0 // indirect - golang.org/x/crypto v0.12.0 // indirect - golang.org/x/exp v0.0.0-20221019170559-20944726eadf // indirect + go.uber.org/multierr v1.9.0 // indirect + golang.org/x/crypto v0.16.0 // indirect + golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect golang.org/x/mod v0.12.0 // indirect - golang.org/x/oauth2 v0.11.0 // indirect - golang.org/x/sys v0.11.0 // indirect - golang.org/x/term v0.11.0 // indirect - golang.org/x/text v0.12.0 // indirect - golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.7.0 // indirect + golang.org/x/oauth2 v0.15.0 // indirect + golang.org/x/sys v0.15.0 // indirect + golang.org/x/term v0.15.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.5.0 // indirect + golang.org/x/tools v0.13.0 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230720185612-659f7aaaa771 // indirect - google.golang.org/protobuf v1.31.0 // indirect + google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/square/go-jose.v2 v2.6.0 // indirect diff --git a/go.sum b/go.sum index 92192655..2cab2797 100644 --- a/go.sum +++ b/go.sum @@ -6,7 +6,6 @@ cloud.google.com/go v0.39.0/go.mod h1:rVLT6fkc8chs9sfPtFc1SBH6em7n+ZoXaG+87tDISt cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= @@ -20,11 +19,10 @@ cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOY cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.110.4 h1:1JYyxKMN9hd5dR2MYTPWkGUgcoxVVhg0LKNKEo0qvmk= +cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -32,19 +30,19 @@ cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUM cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= -cloud.google.com/go/compute v1.20.1 h1:6aKEtlUiwEpJzM001l0yFkpXmUVXaN8W+fbkb2AZNbg= -cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= +cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/iam v1.1.0 h1:67gSqaPukx7O8WLLHMa0PNs3EBGd2eE4d+psbO/CO94= -cloud.google.com/go/iam v1.1.0/go.mod h1:nxdHjaKfCr7fNYx/HJMM8LgiMugmveWlkatear5gVyk= -cloud.google.com/go/kms v1.15.0 h1:xYl5WEaSekKYN5gGRyhjvZKM22GVBBCzegGNVPy+aIs= -cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= -cloud.google.com/go/monitoring v1.15.1 h1:65JhLMd+JiYnXr6j5Z63dUYCuOg770p8a/VC+gil/58= -cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= +cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI= +cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= +cloud.google.com/go/kms v1.15.5 h1:pj1sRfut2eRbD9pFRjNnPNg/CzJPuQAzUujMIM1vVeM= +cloud.google.com/go/kms v1.15.5/go.mod h1:cU2H5jnp6G2TDpUGZyqTCoy1n16fbubHZjmVXSMtwDI= +cloud.google.com/go/monitoring v1.16.3 h1:mf2SN9qSoBtIgiMA4R/y4VADPWZA7VCNJA079qLaZQ8= +cloud.google.com/go/monitoring v1.16.3/go.mod h1:KwSsX5+8PnXv5NJnICZzW2R8pWTis8ypC4zmdRD63Tw= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -54,7 +52,6 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f/go.mod h1:sk5LnIjB/nIEU7yP5sDQExVm62wu0pBh3yrElngUisI= collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= cosmossdk.io/api v0.2.6 h1:AoNwaLLapcLsphhMK6+o0kZl+D6MMUaHVqSdwinASGU= @@ -184,6 +181,8 @@ github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqR github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= +github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/VictoriaMetrics/fastcache v1.6.0/go.mod h1:0qHz5QP0GMX4pfmMA/zt5RgfNuXJrTP0zS7DqpHGGTw= github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= @@ -198,16 +197,16 @@ github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= -github.com/akash-network/akash-api v0.0.41 h1:N3NUF0ZZNU/ypHEUyd8U7wY9+OcR4pTgA2HkeLz7NOI= -github.com/akash-network/akash-api v0.0.41/go.mod h1:aeB/9lti2LegbrOm0fSRNB0iKoE8JmMD8ou9EXJ8QGY= +github.com/akash-network/akash-api v0.0.43 h1:IT/t/tVCVudyKRAx/lqX+eCQW/Yx21l4gBlgaVsHiFU= +github.com/akash-network/akash-api v0.0.43/go.mod h1:YZq1ukyGEknizGaE6g+A8yGupGqEN8hG8PGPPE8VAxA= github.com/akash-network/cometbft v0.34.27-akash h1:V1dApDOr8Ee7BJzYyQ7Z9VBtrAul4+baMeA6C49dje0= github.com/akash-network/cometbft v0.34.27-akash/go.mod h1:BcCbhKv7ieM0KEddnYXvQZR+pZykTKReJJYf7YC7qhw= github.com/akash-network/ledger-go v0.14.3 h1:LCEFkTfgGA2xFMN2CtiKvXKE7dh0QSM77PJHCpSkaAo= github.com/akash-network/ledger-go v0.14.3/go.mod h1:NfsjfFvno9Kaq6mfpsKz4sqjnAVVEsVsnBJfKB4ueAs= -github.com/akash-network/ledger-go/cosmos v0.14.3 h1:bEI9jLHM+Lm55idi4RfJlDez4/rVJs7E1MT0U2whYqI= -github.com/akash-network/ledger-go/cosmos v0.14.3/go.mod h1:SjAfheQTE4rWk0ir+wjbOWxwj8nc8E4AZ08NdsvYG24= -github.com/akash-network/node v0.30.1-rc3 h1:GZ0Ox7pVkH2IKD6sTTRrhXk8SXX/9f9coxI8hwXrbFw= -github.com/akash-network/node v0.30.1-rc3/go.mod h1:Wx/R2O/mSyZEKmDsY2vOHcSETyHeAFw/vJ3fzrrnDsk= +github.com/akash-network/ledger-go/cosmos v0.14.4 h1:h3WiXmoKKs9wkj1LHcJ12cLjXXg6nG1fp+UQ5+wu/+o= +github.com/akash-network/ledger-go/cosmos v0.14.4/go.mod h1:SjAfheQTE4rWk0ir+wjbOWxwj8nc8E4AZ08NdsvYG24= +github.com/akash-network/node v0.30.1-rc4 h1:/n084aQS785ewviogBT5LHeR9e0uq1CPJe7FUus68g8= +github.com/akash-network/node v0.30.1-rc4/go.mod h1:JDFAEdUoIah1Za4zX0N22+qx1fRD4csGo9nS46jV8Es= github.com/alecthomas/participle/v2 v2.0.0-alpha7 h1:cK4vjj0VSgb3lN1nuKA5F7dw+1s1pWBe5bx7nNCnN+c= github.com/alecthomas/participle/v2 v2.0.0-alpha7/go.mod h1:NumScqsC42o9x+dGj8/YqsIfhrIQjFEOFovxotbBirA= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -442,7 +441,7 @@ github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwc github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creachadair/taskgroup v0.3.2 h1:zlfutDS+5XG40AOxcHDSThxKzns8Tnr9jnr6VqkYlkM= github.com/creachadair/taskgroup v0.3.2/go.mod h1:wieWwecHVzsidg2CsUnFinW1faVN4+kq+TDlRJQ0Wbk= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= @@ -450,8 +449,6 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= -github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= github.com/cucumber/common/gherkin/go/v22 v22.0.0 h1:4K8NqptbvdOrjL9DEea6HFjSpbdT9+Q5kgLpmmsHYl0= github.com/cucumber/common/gherkin/go/v22 v22.0.0/go.mod h1:3mJT10B2GGn3MvVPd3FwR7m2u4tLhSRhWUqJU4KN4Fg= github.com/cucumber/common/messages/go/v17 v17.1.1 h1:RNqopvIFyLWnKv0LfATh34SWBhXeoFTJnSrgm9cT/Ts= @@ -578,8 +575,9 @@ github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= +github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg= github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= @@ -599,12 +597,12 @@ github.com/frankban/quicktest v1.4.0/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60 github.com/frankban/quicktest v1.4.1/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ= github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= -github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= -github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/gammazero/deque v0.0.0-20190130191400-2afb3858e9c7/go.mod h1:GeIq9qoE43YdGnDXURnmKTnGg15pQz4mYkXSTChbneI= @@ -620,6 +618,7 @@ github.com/getsentry/sentry-go v0.17.0/go.mod h1:B82dxtBvxG0KaPD8/hfSV+VcHD+Lg/x github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= @@ -666,6 +665,7 @@ github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= +github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= @@ -885,12 +885,11 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= -github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 h1:SJ+NtwL6QaZ21U+IrK7d0gGgpjGGvd2kz+FzTHVzdqI= github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2/go.mod h1:Tv1PlzqC9t8wNnpPdctvtSUOPUUg4SHeE6vR1Ir2hmg= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= @@ -898,16 +897,15 @@ github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.2.5 h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM= -github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w= +github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= +github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gophercloud/gophercloud v0.1.0 h1:P/nh25+rzXouhytV2pUHBb65fnds26Ghl8/391+sT5o= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gopherjs/gopherjs v0.0.0-20180628210949-0892b62f0d9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -984,8 +982,8 @@ github.com/hashicorp/go-hclog v0.10.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39 github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v1.3.1 h1:vDwF1DFNZhntP4DAjuTpOw3uEgMUpXh1pB5fW9DqHpo= -github.com/hashicorp/go-hclog v1.3.1/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= +github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.1.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= @@ -1214,6 +1212,10 @@ github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrO github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= github.com/jackc/pgx v3.3.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jaypipes/ghw v0.12.0 h1:xU2/MDJfWmBhJnujHY9qwXQLs3DBsf0/Xa9vECY0Tho= +github.com/jaypipes/ghw v0.12.0/go.mod h1:jeJGbkRB2lL3/gxYzNYzEDETV1ZJ56OKr+CSeSEym+g= +github.com/jaypipes/pcidb v1.0.0 h1:vtZIfkiCUE42oYbJS0TAq9XSfSmcsgo9IdxSm9qzYU8= +github.com/jaypipes/pcidb v1.0.0/go.mod h1:TnYUvqhPBzCKnH34KrIX22kAeEbDCSRJ9cqLRCuNDfk= github.com/jcmturner/aescts v1.0.1/go.mod h1:k9gJoDUf1GH5r2IBtBjwjDCoLELYxOcEhitdP8RL7qQ= github.com/jcmturner/dnsutils v1.0.1/go.mod h1:tqMo38L01jO8AKxT0S9OQVlGZu3dkEt+z5CA+LOhwB0= github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= @@ -1297,8 +1299,8 @@ github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0 github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.15.11 h1:Lcadnb3RKGin4FYM/orgq0qde+nc15E5Cbqg4B9Sx9c= -github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= +github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM= +github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= @@ -1594,8 +1596,8 @@ github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCko github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= -github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= -github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= +github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= +github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= @@ -1622,7 +1624,6 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= @@ -1722,8 +1723,8 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rook/rook v1.11.1 h1:23GWgK/S+IrtKKbou5e0VAxHNriDxSWZLhHyV92vNIw= github.com/rook/rook v1.11.1/go.mod h1:MPltNTZtf8DmhJjAxsdgYmJaJmFcq/CHadTm2DMPPiM= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= @@ -1739,6 +1740,10 @@ github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= @@ -1784,27 +1789,28 @@ github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4k github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/sony/gobreaker v0.4.2-0.20210216022020-dd874f9dd33b h1:br+bPNZsJWKicw/5rALEo67QHs5weyD5tf8WST+4sJ0= github.com/sony/gobreaker v0.4.2-0.20210216022020-dd874f9dd33b/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= -github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= -github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= -github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= -github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -1815,8 +1821,8 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= -github.com/spf13/viper v1.16.0 h1:rGGH0XDZhdUOryiDWjmIvUSWpbNqisK8Wk0Vyefw8hc= -github.com/spf13/viper v1.16.0/go.mod h1:yg78JgCJcbrQOvV9YLXgkLaZqUidkY9K+Dd1FofRzQg= +github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= +github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= @@ -1838,12 +1844,11 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= -github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzHWCjJB1zZfXPIAaDpzXIEJ0eS6B5Ok= @@ -1876,6 +1881,8 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1 github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/troian/hid v0.13.2 h1:O7PWZQm5YGyg0nVvknFVLVrNTPillz4ZXvxJOtoyteE= github.com/troian/hid v0.13.2/go.mod h1:n6adloQ1876oEXZr6fFsthy4FDHxwJhh7QYQspm30Ds= +github.com/troian/pubsub v0.1.0 h1:ePToDcB/zZjDMk5uuUSCV93Xl7i+1SNvc18tcWso1Q8= +github.com/troian/pubsub v0.1.0/go.mod h1:ALzDZB06e+BF8JeLnO1hbVIY9dCTu8x6mhcdvitlNRs= github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ= @@ -1988,8 +1995,8 @@ go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= -go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= +go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= @@ -2031,14 +2038,12 @@ golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= -golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= +golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2054,8 +2059,8 @@ golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EH golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= -golang.org/x/exp v0.0.0-20221019170559-20944726eadf h1:nFVjjKDgNY37+ZSYCJmtYf7tOlfQswHqplG2eosjOMg= -golang.org/x/exp v0.0.0-20221019170559-20944726eadf/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -2154,8 +2159,8 @@ golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= -golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2175,8 +2180,8 @@ golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= -golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= +golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ= +golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2188,8 +2193,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2223,6 +2228,7 @@ golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2268,7 +2274,6 @@ golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2278,7 +2283,6 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2298,15 +2302,14 @@ golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0= -golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= +golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2318,8 +2321,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= -golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2330,8 +2333,8 @@ golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -2403,14 +2406,13 @@ golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= -golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= -golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2454,8 +2456,8 @@ google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjR google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= -google.golang.org/api v0.134.0 h1:ktL4Goua+UBgoP1eL1/60LwZJqa1sIzkLmvoR3hR6Gw= -google.golang.org/api v0.134.0/go.mod h1:sjRL3UnjTx5UqNQS9EWr9N8p7xbHpy1k0XGRLCf3Spk= +google.golang.org/api v0.153.0 h1:N1AwGhielyKFaUqH07/ZSIQR3uNPcV7NVw0vj+j4iR4= +google.golang.org/api v0.153.0/go.mod h1:3qNJX5eOmhiWYc67jRA/3GsDw97UFb5ivv7Y2PrriAY= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2516,10 +2518,8 @@ google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210126160654-44e461bb6506/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -2527,12 +2527,12 @@ google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaE google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130 h1:Au6te5hbKUV8pIYWHqOUZ1pva5qK/rwbIhoXEUB9Lu8= -google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:O9kGHb51iE/nOGvQaDUuadVYqovW56s5emA88lQnj6Y= -google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130 h1:XVeBY8d/FaK4848myy41HBqnDwvxeV3zMZhwN1TvAMU= -google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:mPBs5jNgx2GuQGvFwUvVKqtn6HsUw9nP64BedgvqEsQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230720185612-659f7aaaa771 h1:Z8qdAF9GFsmcUuWQ5KVYIpP3PCKydn/YKORnghIalu4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230720185612-659f7aaaa771/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= +google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ= +google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:J7XzRzVy1+IPwWHZUzoD0IccYZIrXILAQpc+Qy9CMhY= +google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 h1:JpwMPBpFN3uKhdaekDpiNlImDdkUAyiJ6ez/uxGaUSo= +google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f h1:ultW7fxlIvee4HYrtnaRPon9HpEgFk5zYpmfMgtKB5I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= @@ -2568,8 +2568,8 @@ google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= -google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw= -google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= +google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= +google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/make/releasing.mk b/make/releasing.mk index c78cf1c0..9beb073d 100644 --- a/make/releasing.mk +++ b/make/releasing.mk @@ -3,13 +3,20 @@ GORELEASER_DEBUG ?= false GORELEASER_IMAGE := ghcr.io/goreleaser/goreleaser-cross:$(GOTOOLCHAIN_SEMVER) GORELEASER_MOUNT_CONFIG ?= false -ifeq ($(GORELEASER_RELEASE),true) - GORELEASER_SKIP_VALIDATE := false - GORELEASER_SKIP_PUBLISH := release --skip-publish=false -else - GORELEASER_SKIP_PUBLISH := --skip-publish=true - GORELEASER_SKIP_VALIDATE ?= false +GORELEASER_SKIP_FLAGS := $(GORELEASER_SKIP) +GORELEASER_SKIP := + +null := +space := $(null) # +comma := , + +ifneq ($(GORELEASER_RELEASE),true) GITHUB_TOKEN= + GORELEASER_SKIP_FLAGS += publish +endif + +ifneq ($(GORELEASER_SKIP_FLAGS),) + GORELEASER_SKIP := --skip=$(subst $(space),$(comma),$(strip $(GORELEASER_SKIP_FLAGS))) endif ifeq ($(GORELEASER_MOUNT_CONFIG),true) @@ -70,8 +77,7 @@ docker-image: -f .goreleaser-docker.yaml \ --debug=$(GORELEASER_DEBUG) \ --clean \ - --skip-validate \ - --skip-publish \ + --skip=publish,validate \ --snapshot .PHONY: gen-changelog @@ -100,8 +106,8 @@ release: gen-changelog -w /go/src/$(GO_MOD_NAME)\ $(GORELEASER_IMAGE) \ -f "$(GORELEASER_CONFIG)" \ - $(GORELEASER_SKIP_PUBLISH) \ - --skip-validate=$(GORELEASER_SKIP_VALIDATE) \ + release \ + $(GORELEASER_SKIP) \ --debug=$(GORELEASER_DEBUG) \ --clean \ --release-notes=/go/src/$(GO_MOD_NAME)/.cache/changelog.md diff --git a/manifest/manager.go b/manifest/manager.go index e56fecfd..3f8b9a6d 100644 --- a/manifest/manager.go +++ b/manifest/manager.go @@ -334,12 +334,12 @@ func (m *manager) fillAllRequests(response error) { func (m *manager) emitReceivedEvents() { if !m.fetched || len(m.manifests) == 0 { - m.log.Debug("emit received events skipped", "data", m.data, "manifests", len(m.manifests)) + m.log.Debug("emit received events skipped", "manifests", len(m.manifests)) return } if len(m.localLeases) == 0 { - m.log.Debug("emit received events skips due to no leases", "data", m.data, "manifests", len(m.manifests)) + m.log.Debug("emit received events skips due to no leases", "manifests", len(m.manifests)) m.fillAllRequests(ErrNoLeaseForDeployment) return } diff --git a/manifest/mocks/client.go b/manifest/mocks/client.go index 961f88eb..d57fdd3d 100644 --- a/manifest/mocks/client.go +++ b/manifest/mocks/client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.32.0. DO NOT EDIT. +// Code generated by mockery v2.40.1. DO NOT EDIT. package mocks @@ -29,6 +29,10 @@ func (_m *Client) EXPECT() *Client_Expecter { func (_m *Client) IsActive(_a0 context.Context, _a1 v1beta3.DeploymentID) (bool, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for IsActive") + } + var r0 bool var r1 error if rf, ok := ret.Get(0).(func(context.Context, v1beta3.DeploymentID) (bool, error)); ok { @@ -82,6 +86,10 @@ func (_c *Client_IsActive_Call) RunAndReturn(run func(context.Context, v1beta3.D func (_m *Client) Submit(_a0 context.Context, _a1 v1beta3.DeploymentID, _a2 v2beta2.Manifest) error { ret := _m.Called(_a0, _a1, _a2) + if len(ret) == 0 { + panic("no return value specified for Submit") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, v1beta3.DeploymentID, v2beta2.Manifest) error); ok { r0 = rf(_a0, _a1, _a2) diff --git a/manifest/mocks/status_client.go b/manifest/mocks/status_client.go index 2d4606c6..6c24a3de 100644 --- a/manifest/mocks/status_client.go +++ b/manifest/mocks/status_client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.32.0. DO NOT EDIT. +// Code generated by mockery v2.40.1. DO NOT EDIT. package mocks @@ -7,6 +7,8 @@ import ( manifest "github.com/akash-network/provider/manifest" mock "github.com/stretchr/testify/mock" + + v1 "github.com/akash-network/akash-api/go/provider/v1" ) // StatusClient is an autogenerated mock type for the StatusClient type @@ -26,6 +28,10 @@ func (_m *StatusClient) EXPECT() *StatusClient_Expecter { func (_m *StatusClient) Status(_a0 context.Context) (*manifest.Status, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for Status") + } + var r0 *manifest.Status var r1 error if rf, ok := ret.Get(0).(func(context.Context) (*manifest.Status, error)); ok { @@ -76,6 +82,64 @@ func (_c *StatusClient_Status_Call) RunAndReturn(run func(context.Context) (*man return _c } +// StatusV1 provides a mock function with given fields: _a0 +func (_m *StatusClient) StatusV1(_a0 context.Context) (*v1.ManifestStatus, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for StatusV1") + } + + var r0 *v1.ManifestStatus + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*v1.ManifestStatus, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(context.Context) *v1.ManifestStatus); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*v1.ManifestStatus) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// StatusClient_StatusV1_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'StatusV1' +type StatusClient_StatusV1_Call struct { + *mock.Call +} + +// StatusV1 is a helper method to define mock.On call +// - _a0 context.Context +func (_e *StatusClient_Expecter) StatusV1(_a0 interface{}) *StatusClient_StatusV1_Call { + return &StatusClient_StatusV1_Call{Call: _e.mock.On("StatusV1", _a0)} +} + +func (_c *StatusClient_StatusV1_Call) Run(run func(_a0 context.Context)) *StatusClient_StatusV1_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *StatusClient_StatusV1_Call) Return(_a0 *v1.ManifestStatus, _a1 error) *StatusClient_StatusV1_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *StatusClient_StatusV1_Call) RunAndReturn(run func(context.Context) (*v1.ManifestStatus, error)) *StatusClient_StatusV1_Call { + _c.Call.Return(run) + return _c +} + // NewStatusClient creates a new instance of StatusClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewStatusClient(t interface { diff --git a/manifest/service.go b/manifest/service.go index e9171ab7..0e948d75 100644 --- a/manifest/service.go +++ b/manifest/service.go @@ -5,8 +5,10 @@ import ( "errors" "time" + provider "github.com/akash-network/akash-api/go/provider/v1" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + tpubsub "github.com/troian/pubsub" "github.com/boz/go-lifecycle" @@ -19,6 +21,8 @@ import ( clustertypes "github.com/akash-network/provider/cluster/types/v1beta3" "github.com/akash-network/provider/event" "github.com/akash-network/provider/session" + "github.com/akash-network/provider/tools/fromctx" + ptypes "github.com/akash-network/provider/types" ) // ErrNotRunning is the error when service is not running @@ -43,6 +47,7 @@ var ( //go:generate mockery --name StatusClient type StatusClient interface { Status(context.Context) (*Status, error) + StatusV1(context.Context) (*provider.ManifestStatus, error) } // Client is the interface that wraps HandleManifest method @@ -71,6 +76,7 @@ func NewService(ctx context.Context, session session.Session, bus pubsub.Bus, ho } s := &service{ + ctx: ctx, session: session, bus: bus, sub: sub, @@ -94,6 +100,7 @@ func NewService(ctx context.Context, session session.Session, bus pubsub.Bus, ho } type service struct { + ctx context.Context config ServiceConfig session session.Session bus pubsub.Bus @@ -215,29 +222,46 @@ func (s *service) Status(ctx context.Context) (*Status, error) { } } +func (s *service) StatusV1(ctx context.Context) (*provider.ManifestStatus, error) { + res, err := s.Status(ctx) + if err != nil { + return nil, err + } + + return &provider.ManifestStatus{Deployments: res.Deployments}, nil +} + func (s *service) run() { defer s.lc.ShutdownCompleted() defer s.sub.Close() + bus := fromctx.PubSubFromCtx(s.ctx) + s.updateGauges() + + signalch := make(chan struct{}, 1) + trySignal := func() { + select { + case signalch <- struct{}{}: + case <-s.lc.ShutdownRequest(): + default: + } + } + loop: for { select { - case err := <-s.lc.ShutdownRequest(): s.lc.ShutdownInitiated(err) break loop - case ev := <-s.sub.Events(): switch ev := ev.(type) { - case event.LeaseWon: if ev.LeaseID.GetProvider() != s.session.Provider().Address().String() { continue } s.session.Log().Info("lease won", "lease", ev.LeaseID) s.handleLease(ev, true) - case dtypes.EventDeploymentUpdated: s.session.Log().Info("update received", "deployment", ev.ID, "version", ev.Version) @@ -246,14 +270,12 @@ loop: s.session.Log().Info("deployment updated", "deployment", ev.ID, "version", ev.Version) manager.handleUpdate(ev.Version) } - case dtypes.EventDeploymentClosed: key := dquery.DeploymentPath(ev.ID) if manager := s.managers[key]; manager != nil { s.session.Log().Info("deployment closed", "deployment", ev.ID) manager.stop() } - case mtypes.EventLeaseClosed: if ev.ID.GetProvider() != s.session.Provider().Address().String() { continue @@ -265,11 +287,9 @@ loop: manager.removeLease(ev.ID) } } - case check := <-s.activeCheckCh: _, ok := s.managers[dquery.DeploymentPath(check.Deployment)] check.ch <- ok - case req := <-s.mreqch: // Cancel the watchdog (if it exists), since a manifest has been received s.maybeRemoveWatchdog(req.value.Deployment) @@ -277,13 +297,13 @@ loop: manager := s.ensureManager(req.value.Deployment) // The manager is responsible for putting a result in req.ch manager.handleManifest(req) - + trySignal() case ch := <-s.statusch: - ch <- &Status{ Deployments: uint32(len(s.managers)), } - + case <-signalch: + bus.Pub(provider.ManifestStatus{Deployments: uint32(len(s.managers))}, []string{ptypes.PubSubTopicManifestStatus}, tpubsub.WithRetain()) case manager := <-s.managerch: s.session.Log().Info("manager done", "deployment", manager.daddr) @@ -292,6 +312,7 @@ loop: // Cancel the watchdog (if it exists) since the manager has stopped as well s.maybeRemoveWatchdog(manager.daddr) + trySignal() case leaseID := <-s.watchdogch: s.session.Log().Info("watchdog done", "lease", leaseID) delete(s.watchdogs, leaseID) diff --git a/mocks/client.go b/mocks/client.go index 7125b63f..aec64c1d 100644 --- a/mocks/client.go +++ b/mocks/client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.32.0. DO NOT EDIT. +// Code generated by mockery v2.40.1. DO NOT EDIT. package mocks @@ -17,6 +17,8 @@ import ( types "github.com/cosmos/cosmos-sdk/types" + v1 "github.com/akash-network/akash-api/go/provider/v1" + v1beta3 "github.com/akash-network/provider/cluster/types/v1beta3" ) @@ -37,6 +39,10 @@ func (_m *Client) EXPECT() *Client_Expecter { func (_m *Client) Cluster() cluster.Client { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Cluster") + } + var r0 cluster.Client if rf, ok := ret.Get(0).(func() cluster.Client); ok { r0 = rf() @@ -80,6 +86,10 @@ func (_c *Client_Cluster_Call) RunAndReturn(run func() cluster.Client) *Client_C func (_m *Client) ClusterService() cluster.Service { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for ClusterService") + } + var r0 cluster.Service if rf, ok := ret.Get(0).(func() cluster.Service); ok { r0 = rf() @@ -123,6 +133,10 @@ func (_c *Client_ClusterService_Call) RunAndReturn(run func() cluster.Service) * func (_m *Client) Hostname() v1beta3.HostnameServiceClient { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Hostname") + } + var r0 v1beta3.HostnameServiceClient if rf, ok := ret.Get(0).(func() v1beta3.HostnameServiceClient); ok { r0 = rf() @@ -166,6 +180,10 @@ func (_c *Client_Hostname_Call) RunAndReturn(run func() v1beta3.HostnameServiceC func (_m *Client) Manifest() manifest.Client { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Manifest") + } + var r0 manifest.Client if rf, ok := ret.Get(0).(func() manifest.Client); ok { r0 = rf() @@ -209,6 +227,10 @@ func (_c *Client_Manifest_Call) RunAndReturn(run func() manifest.Client) *Client func (_m *Client) Status(_a0 context.Context) (*provider.Status, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for Status") + } + var r0 *provider.Status var r1 error if rf, ok := ret.Get(0).(func(context.Context) (*provider.Status, error)); ok { @@ -259,10 +281,72 @@ func (_c *Client_Status_Call) RunAndReturn(run func(context.Context) (*provider. return _c } +// StatusV1 provides a mock function with given fields: ctx +func (_m *Client) StatusV1(ctx context.Context) (*v1.Status, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for StatusV1") + } + + var r0 *v1.Status + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*v1.Status, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *v1.Status); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*v1.Status) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Client_StatusV1_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'StatusV1' +type Client_StatusV1_Call struct { + *mock.Call +} + +// StatusV1 is a helper method to define mock.On call +// - ctx context.Context +func (_e *Client_Expecter) StatusV1(ctx interface{}) *Client_StatusV1_Call { + return &Client_StatusV1_Call{Call: _e.mock.On("StatusV1", ctx)} +} + +func (_c *Client_StatusV1_Call) Run(run func(ctx context.Context)) *Client_StatusV1_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *Client_StatusV1_Call) Return(_a0 *v1.Status, _a1 error) *Client_StatusV1_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Client_StatusV1_Call) RunAndReturn(run func(context.Context) (*v1.Status, error)) *Client_StatusV1_Call { + _c.Call.Return(run) + return _c +} + // Validate provides a mock function with given fields: _a0, _a1, _a2 func (_m *Client) Validate(_a0 context.Context, _a1 types.Address, _a2 deploymentv1beta3.GroupSpec) (provider.ValidateGroupSpecResult, error) { ret := _m.Called(_a0, _a1, _a2) + if len(ret) == 0 { + panic("no return value specified for Validate") + } + var r0 provider.ValidateGroupSpecResult var r1 error if rf, ok := ret.Get(0).(func(context.Context, types.Address, deploymentv1beta3.GroupSpec) (provider.ValidateGroupSpecResult, error)); ok { diff --git a/mocks/status_client.go b/mocks/status_client.go index 5b470185..db819bdb 100644 --- a/mocks/status_client.go +++ b/mocks/status_client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.32.0. DO NOT EDIT. +// Code generated by mockery v2.40.1. DO NOT EDIT. package mocks @@ -7,6 +7,8 @@ import ( provider "github.com/akash-network/provider" mock "github.com/stretchr/testify/mock" + + v1 "github.com/akash-network/akash-api/go/provider/v1" ) // StatusClient is an autogenerated mock type for the StatusClient type @@ -26,6 +28,10 @@ func (_m *StatusClient) EXPECT() *StatusClient_Expecter { func (_m *StatusClient) Status(_a0 context.Context) (*provider.Status, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for Status") + } + var r0 *provider.Status var r1 error if rf, ok := ret.Get(0).(func(context.Context) (*provider.Status, error)); ok { @@ -76,6 +82,64 @@ func (_c *StatusClient_Status_Call) RunAndReturn(run func(context.Context) (*pro return _c } +// StatusV1 provides a mock function with given fields: ctx +func (_m *StatusClient) StatusV1(ctx context.Context) (*v1.Status, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for StatusV1") + } + + var r0 *v1.Status + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*v1.Status, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *v1.Status); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*v1.Status) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// StatusClient_StatusV1_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'StatusV1' +type StatusClient_StatusV1_Call struct { + *mock.Call +} + +// StatusV1 is a helper method to define mock.On call +// - ctx context.Context +func (_e *StatusClient_Expecter) StatusV1(ctx interface{}) *StatusClient_StatusV1_Call { + return &StatusClient_StatusV1_Call{Call: _e.mock.On("StatusV1", ctx)} +} + +func (_c *StatusClient_StatusV1_Call) Run(run func(ctx context.Context)) *StatusClient_StatusV1_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *StatusClient_StatusV1_Call) Return(_a0 *v1.Status, _a1 error) *StatusClient_StatusV1_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *StatusClient_StatusV1_Call) RunAndReturn(run func(context.Context) (*v1.Status, error)) *StatusClient_StatusV1_Call { + _c.Call.Return(run) + return _c +} + // NewStatusClient creates a new instance of StatusClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewStatusClient(t interface { diff --git a/operator/cmd.go b/operator/cmd.go index 31a9228d..ea35d61b 100644 --- a/operator/cmd.go +++ b/operator/cmd.go @@ -14,6 +14,7 @@ func Cmd() *cobra.Command { } cmd.AddCommand(inventory.Cmd()) + cmd.AddCommand(cmdPsutil()) return cmd } diff --git a/operator/inventory/annotation.go b/operator/inventory/annotation.go new file mode 100644 index 00000000..1faec3c2 --- /dev/null +++ b/operator/inventory/annotation.go @@ -0,0 +1,196 @@ +package inventory + +import ( + "encoding/json" + "errors" + "fmt" + "sort" + "strings" + + "github.com/blang/semver/v4" + "gopkg.in/yaml.v3" +) + +const ( + sdlVersionField = "version" + + AnnotationKeyCapabilities = "akash.network/capabilities" +) + +var ( + errCapabilitiesInvalid = errors.New("capabilities: invalid") + errCapabilitiesInvalidContent = fmt.Errorf("%w: content", errCapabilitiesInvalid) + errCapabilitiesInvalidNoVersion = fmt.Errorf("%w: no version found", errCapabilitiesInvalid) + errCapabilitiesInvalidVersion = fmt.Errorf("%w: version", errCapabilitiesInvalid) + errCapabilitiesUnsupportedVersion = fmt.Errorf("%w: unsupported version", errCapabilitiesInvalid) +) + +type CapabilitiesV1 struct { + StorageClasses []string `json:"storage_classes"` +} + +type Capabilities interface{} + +type AnnotationCapabilities struct { + Version semver.Version `json:"version" yaml:"version"` + Capabilities `yaml:",inline"` +} + +var ( + _ json.Marshaler = (*AnnotationCapabilities)(nil) + _ json.Unmarshaler = (*AnnotationCapabilities)(nil) +) + +func remove[T any](slice []T, s int) []T { + return append(slice[:s], slice[s+1:]...) +} + +func NewAnnotationCapabilities(sc []string) *AnnotationCapabilities { + caps := &CapabilitiesV1{ + StorageClasses: make([]string, len(sc)), + } + + copy(caps.StorageClasses, sc) + + res := &AnnotationCapabilities{ + Version: semver.Version{Major: 1}, + Capabilities: caps, + } + + return res +} +func (s *CapabilitiesV1) RemoveClass(name string) bool { + for i, c := range s.StorageClasses { + if c == name { + s.StorageClasses = remove(s.StorageClasses, i) + sort.Strings(s.StorageClasses) + return true + } + } + + return false +} + +func parseNodeCapabilities(annotations map[string]string) (*AnnotationCapabilities, error) { + res := &AnnotationCapabilities{} + + val, exists := annotations[AnnotationKeyCapabilities] + if !exists { + return nil, nil + } + + var err error + if strings.HasPrefix(val, "{") { + err = json.Unmarshal([]byte(val), res) + } else { + err = yaml.Unmarshal([]byte(val), res) + } + + if err != nil { + return nil, err + } + + return res, nil +} + +func (s *AnnotationCapabilities) UnmarshalYAML(node *yaml.Node) error { + var result AnnotationCapabilities + + foundVersion := false + for idx := range node.Content { + if node.Content[idx].Value == sdlVersionField { + var err error + if result.Version, err = semver.ParseTolerant(node.Content[idx+1].Value); err != nil { + return fmt.Errorf("%w: %w", errCapabilitiesInvalidVersion, err) + } + foundVersion = true + break + } + } + + if !foundVersion { + return errCapabilitiesInvalidNoVersion + } + + // nolint: gocritic + switch result.Version.String() { + case "1.0.0": + var decoded CapabilitiesV1 + if err := node.Decode(&decoded); err != nil { + return fmt.Errorf("%w: %w", errCapabilitiesInvalidContent, err) + } + + sort.Strings(decoded.StorageClasses) + + result.Capabilities = &decoded + default: + return fmt.Errorf("%w: %q", errCapabilitiesUnsupportedVersion, result.Version) + } + + *s = result + + return nil +} + +func (s *AnnotationCapabilities) UnmarshalJSON(data []byte) error { + core := make(map[string]interface{}) + + err := json.Unmarshal(data, &core) + if err != nil { + return fmt.Errorf("%w: %w", errCapabilitiesInvalidContent, err) + } + + if _, exists := core[sdlVersionField]; !exists { + return errCapabilitiesInvalidNoVersion + } + + result := AnnotationCapabilities{} + + if val, valid := core[sdlVersionField].(string); valid { + if result.Version, err = semver.ParseTolerant(val); err != nil { + return fmt.Errorf("%w: %w", errCapabilitiesInvalidVersion, err) + } + } else { + return errCapabilitiesInvalidNoVersion + } + + // nolint: gocritic + switch result.Version.String() { + case "1.0.0": + var decoded CapabilitiesV1 + if err := json.Unmarshal(data, &decoded); err != nil { + return fmt.Errorf("%w: %w", errCapabilitiesInvalidContent, err) + } + + sort.Strings(decoded.StorageClasses) + + result.Capabilities = &decoded + default: + return fmt.Errorf("%w: %q", errCapabilitiesUnsupportedVersion, result.Version) + } + + *s = result + + return nil +} + +// MarshalJSON bc at the time of writing Go 1.21 json/encoding does not support inline tag +// this function circumvents the issue by using temporary anonymous struct +func (s *AnnotationCapabilities) MarshalJSON() ([]byte, error) { + var obj interface{} + + // remove no lint when next version added + // nolint: gocritic + switch caps := s.Capabilities.(type) { + case *CapabilitiesV1: + obj = struct { + Version semver.Version `json:"version"` + CapabilitiesV1 + }{ + Version: s.Version, + CapabilitiesV1: *caps, + } + } + + return json.Marshal(obj) +} diff --git a/operator/inventory/annotation_test.go b/operator/inventory/annotation_test.go new file mode 100644 index 00000000..00e69064 --- /dev/null +++ b/operator/inventory/annotation_test.go @@ -0,0 +1,106 @@ +package inventory + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + "gopkg.in/yaml.v3" +) + +type testCase struct { + name string + data string + expErr error +} + +func TestAnnotationsJson(t *testing.T) { + testCases := []testCase{ + { + name: "v1/valid", + data: `{"version":"v1.0.0","storage_classes":["beta2","default"]}`, + expErr: nil, + }, + { + name: "v1/invalid/missing version", + data: `{"storage_classes":["beta2","default"]}`, + expErr: errCapabilitiesInvalidNoVersion, + }, + { + name: "v1/invalid/bad version", + data: `{"version":"bla","storage_classes":["beta2","default"]}`, + expErr: errCapabilitiesInvalidVersion, + }, + { + name: "v1/invalid/unsupported version", + data: `{"version":"v10000.0.0","storage_classes":["beta2","default"]}`, + expErr: errCapabilitiesUnsupportedVersion, + }, + } + + for _, test := range testCases { + t.Run(test.name, func(t *testing.T) { + caps := &AnnotationCapabilities{} + + err := json.Unmarshal([]byte(test.data), caps) + if test.expErr == nil { + assert.NoError(t, err) + } else { + assert.ErrorAs(t, err, &test.expErr) + } + }) + } +} + +func TestAnnotationsYaml(t *testing.T) { + testCases := []testCase{ + { + name: "v1/valid", + data: `--- +version: v1.0.0 +storage_classes: + - beta2 + - default`, + expErr: nil, + }, + { + name: "v1/invalid/missing version", + data: `--- +storage_classes: + - beta2 + - default`, + expErr: errCapabilitiesInvalidNoVersion, + }, + { + name: "v1/invalid/bad version", + data: `--- +version: bla +storage_classes: + - beta2 + - default`, + expErr: errCapabilitiesInvalidVersion, + }, + { + name: "v1/invalid/unsupported version", + data: `--- +version: v10000.0.0 +storage_classes: + - beta2 + - default`, + expErr: errCapabilitiesUnsupportedVersion, + }, + } + + for _, test := range testCases { + t.Run(test.name, func(t *testing.T) { + caps := &AnnotationCapabilities{} + + err := yaml.Unmarshal([]byte(test.data), caps) + if test.expErr == nil { + assert.NoError(t, err) + } else { + assert.ErrorAs(t, err, &test.expErr) + } + }) + } +} diff --git a/operator/inventory/ceph.go b/operator/inventory/ceph.go index 70269183..d903b4e4 100644 --- a/operator/inventory/ceph.go +++ b/operator/inventory/ceph.go @@ -9,25 +9,23 @@ import ( "strings" "time" + inventory "github.com/akash-network/akash-api/go/inventory/v1" "github.com/go-logr/logr" - "github.com/pkg/errors" rookv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" rookclientset "github.com/rook/rook/pkg/client/clientset/versioned" rookifactory "github.com/rook/rook/pkg/client/informers/externalversions" + corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/watch" - "github.com/akash-network/node/util/runner" - akashv2beta2 "github.com/akash-network/provider/pkg/apis/akash.network/v2beta2" + "github.com/akash-network/provider/tools/fromctx" ) const ( crdDiscoverPeriod = 30 * time.Second -) - -var ( - errCephInventoryInProgress = errors.New("ceph inventory is being updated") + falseVal = "false" ) type stats struct { @@ -80,6 +78,7 @@ type cephStorageClass struct { type cephStorageClasses map[string]cephStorageClass +// nolint: unused func (sc cephStorageClasses) dup() cephStorageClasses { res := make(cephStorageClasses, len(sc)) @@ -90,6 +89,7 @@ func (sc cephStorageClasses) dup() cephStorageClasses { return res } +// nolint: unused func (cc cephClusters) dup() cephClusters { res := make(cephClusters, len(cc)) @@ -100,25 +100,47 @@ func (cc cephClusters) dup() cephClusters { return res } +type scrapeResp struct { + storage inventory.ClusterStorage + err error +} + +type scrapeReq struct { + scs cephStorageClasses + clusters map[string]string + respch chan<- scrapeResp +} + type ceph struct { - exe RemotePodCommandExecutor - ctx context.Context - cancel context.CancelFunc - querier + exe RemotePodCommandExecutor + ctx context.Context + cancel context.CancelFunc + scrapech chan scrapeReq } -func NewCeph(ctx context.Context) (Storage, error) { +func NewCeph(ctx context.Context) (QuerierStorage, error) { ctx, cancel := context.WithCancel(ctx) c := &ceph{ - exe: NewRemotePodCommandExecutor(KubeConfigFromCtx(ctx), KubeClientFromCtx(ctx)), - ctx: ctx, - cancel: cancel, - querier: newQuerier(), + exe: NewRemotePodCommandExecutor(fromctx.KubeConfigFromCtx(ctx), fromctx.KubeClientFromCtx(ctx)), + ctx: ctx, + cancel: cancel, + scrapech: make(chan scrapeReq, 1), } - group := ErrGroupFromCtx(ctx) - group.Go(c.run) + startch := make(chan struct{}, 1) + + group := fromctx.ErrGroupFromCtx(ctx) + group.Go(func() error { + return c.run(startch) + }) + group.Go(c.scraper) + + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-startch: + } return c, nil } @@ -139,32 +161,20 @@ func (c *ceph) crdInstalled(log logr.Logger, rc *rookclientset.Clientset) bool { return false } -func (c *ceph) run() error { - events := make(chan interface{}, 1000) +func (c *ceph) run(startch chan<- struct{}) error { + bus := fromctx.PubSubFromCtx(c.ctx) - pubsub := PubSubFromCtx(c.ctx) + cephClustersTopic := "cephclusters" - defer pubsub.Unsub(events) - pubsub.AddSub(events, "ns", "sc") + events := bus.Sub("ns", "sc", "pv", cephClustersTopic) - log := LogFromCtx(c.ctx).WithName("rook-ceph") + defer bus.Unsub(events) + + log := fromctx.LogrFromCtx(c.ctx).WithName("rook-ceph") clusters := make(cephClusters) scs := make(cephStorageClasses) - scrapeData := resp{ - res: nil, - err: errCephInventoryInProgress, - } - - scrapech := runner.Do(func() runner.Result { - return runner.NewResult(c.scrapeMetrics(c.ctx, scs.dup(), clusters.dup())) - }) - - cephClustersTopic := "cephclusters" - - pubsub.AddSub(events, cephClustersTopic) - rc := RookClientFromCtx(c.ctx) factory := rookifactory.NewSharedInformerFactory(rc, 0) @@ -172,6 +182,23 @@ func (c *ceph) run() error { crdDiscoverTick := time.NewTicker(1 * time.Second) + scrapeRespch := make(chan scrapeResp, 1) + scrapech := c.scrapech + + startch <- struct{}{} + + tryScrape := func() { + select { + case scrapech <- scrapeReq{ + scs: scs, + clusters: clusters, + respch: scrapeRespch, + }: + scrapech = nil + default: + } + } + for { select { case <-c.ctx.Done(): @@ -180,7 +207,7 @@ func (c *ceph) run() error { if c.crdInstalled(log, rc) { crdDiscoverTick.Stop() InformKubeObjects(c.ctx, - pubsub, + bus, informer, cephClustersTopic) } else { @@ -214,7 +241,7 @@ func (c *ceph) run() error { case watch.Modified: lblVal := obj.Labels["akash.network"] if lblVal == "" { - lblVal = "false" + lblVal = falseVal } sc := cephStorageClass{} @@ -242,6 +269,7 @@ func (c *ceph) run() error { } log.Info(msg, "name", obj.Name) + tryScrape() case *rookv1.CephCluster: switch evt.Type { case watch.Added: @@ -256,30 +284,78 @@ func (c *ceph) run() error { log.Info(msg, "ns", obj.Namespace, "name", obj.Name) delete(clusters, obj.Name) } + case *corev1.PersistentVolume: + tryScrape() } } - case req := <-c.reqch: - req.respCh <- scrapeData - case res := <-scrapech: - r := resp{} - if err := res.Error(); err != nil { - r.err = errCephInventoryInProgress - log.Error(err, "unable to pull ceph status") + case res := <-scrapeRespch: + if len(res.storage) > 0 { + bus.Pub(storageSignal{ + driver: "ceph", + storage: res.storage, + }, []string{topicStorage}) } - if data, valid := res.Value().([]akashv2beta2.InventoryClusterStorage); valid { - r.res = data + scrapech = c.scrapech + } + } +} + +func (c *ceph) scraper() error { + log := fromctx.LogrFromCtx(c.ctx).WithName("rook-ceph") + + for { + select { + case <-c.ctx.Done(): + return c.ctx.Err() + case req := <-c.scrapech: + var res inventory.ClusterStorage + + dfResults := make(map[string]dfResp, len(req.clusters)) + for clusterID, ns := range req.clusters { + stdout, _, err := c.exe.ExecCommandInContainerWithFullOutputWithTimeout(c.ctx, "rook-ceph-tools", "rook-ceph-tools", ns, "ceph", "df", "--format", "json") + if err != nil { + log.Error(err, "unable to scrape ceph metrics") + } + + rsp := dfResp{} + + _ = json.Unmarshal([]byte(stdout), &rsp) + + dfResults[clusterID] = rsp } - scrapeData = r + for class, params := range req.scs { + df, exists := dfResults[params.clusterID] + if !exists || !params.isAkashManaged { + continue + } - scrapech = runner.Do(func() runner.Result { - return runner.NewResult(c.scrapeMetrics(c.ctx, scs.dup(), clusters.dup())) - }) + for _, pool := range df.Pools { + if pool.Name == params.pool { + res = append(res, inventory.Storage{ + Quantity: inventory.ResourcePair{ + Allocated: resource.NewQuantity(int64(pool.Stats.BytesUsed), resource.DecimalSI), + Allocatable: resource.NewQuantity(int64(pool.Stats.MaxAvail), resource.DecimalSI), + }, + Info: inventory.StorageInfo{ + Class: class, + }, + }) + break + } + } + } + + req.respch <- scrapeResp{ + storage: res, + err: nil, + } } } } +// nolint: unused func (c *ceph) scrapeMetrics(ctx context.Context, scs cephStorageClasses, clusters map[string]string) ([]akashv2beta2.InventoryClusterStorage, error) { var res []akashv2beta2.InventoryClusterStorage diff --git a/operator/inventory/cmd.go b/operator/inventory/cmd.go index d91134aa..61ada6e3 100644 --- a/operator/inventory/cmd.go +++ b/operator/inventory/cmd.go @@ -5,64 +5,102 @@ import ( "encoding/json" "errors" "fmt" - "log" "net" "net/http" - "sync" + "strings" "time" - "github.com/akash-network/node/util/runner" - "github.com/cskr/pubsub" + "github.com/fsnotify/fsnotify" "github.com/go-logr/logr" "github.com/go-logr/zapr" "github.com/gorilla/mux" rookclientset "github.com/rook/rook/pkg/client/clientset/versioned" "github.com/spf13/cobra" "github.com/spf13/viper" + "github.com/troian/pubsub" "go.uber.org/zap" "go.uber.org/zap/zapcore" "golang.org/x/sync/errgroup" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/reflection" + "google.golang.org/protobuf/types/known/emptypb" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" + inventory "github.com/akash-network/akash-api/go/inventory/v1" + "github.com/akash-network/provider/cluster/kube/clientcommon" providerflags "github.com/akash-network/provider/cmd/provider-services/cmd/flags" cmdutil "github.com/akash-network/provider/cmd/provider-services/cmd/util" - akashv2beta2 "github.com/akash-network/provider/pkg/apis/akash.network/v2beta2" akashclientset "github.com/akash-network/provider/pkg/client/clientset/versioned" + "github.com/akash-network/provider/tools/fromctx" ) -func CmdSetContextValue(cmd *cobra.Command, key, val interface{}) { - cmd.SetContext(context.WithValue(cmd.Context(), key, val)) +type router struct { + *mux.Router + queryTimeout time.Duration +} + +type grpcMsgService struct { + inventory.ClusterRPCServer + ctx context.Context } func Cmd() *cobra.Command { cmd := &cobra.Command{ Use: "inventory", Short: "kubernetes operator interfacing inventory", - Args: cobra.ExactArgs(0), SilenceUsage: true, - PreRunE: func(cmd *cobra.Command, args []string) error { + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { zconf := zap.NewDevelopmentConfig() zconf.DisableCaller = true + zconf.DisableStacktrace = true zconf.EncoderConfig.EncodeTime = func(time.Time, zapcore.PrimitiveArrayEncoder) {} zapLog, _ := zconf.Build() - cmd.SetContext(logr.NewContext(cmd.Context(), zapr.NewLogger(zapLog))) + group, ctx := errgroup.WithContext(cmd.Context()) + + cmd.SetContext(logr.NewContext(ctx, zapr.NewLogger(zapLog))) if err := loadKubeConfig(cmd); err != nil { return err } - kubecfg := KubeConfigFromCtx(cmd.Context()) + kubecfg := fromctx.KubeConfigFromCtx(cmd.Context()) - clientset, err := kubernetes.NewForConfig(kubecfg) + kc, err := kubernetes.NewForConfig(kubecfg) if err != nil { return err } + startupch := make(chan struct{}, 1) + pctx, pcancel := context.WithCancel(context.Background()) + + fromctx.CmdSetContextValue(cmd, fromctx.CtxKeyStartupCh, (chan<- struct{})(startupch)) + fromctx.CmdSetContextValue(cmd, fromctx.CtxKeyKubeConfig, kubecfg) + fromctx.CmdSetContextValue(cmd, fromctx.CtxKeyKubeClientSet, kc) + fromctx.CmdSetContextValue(cmd, fromctx.CtxKeyErrGroup, group) + fromctx.CmdSetContextValue(cmd, fromctx.CtxKeyPubSub, pubsub.New(pctx, 1000)) + + go func() { + defer pcancel() + + select { + case <-ctx.Done(): + return + case <-startupch: + } + + _ = group.Wait() + }() + + return nil + }, + PreRunE: func(cmd *cobra.Command, args []string) error { + kubecfg := fromctx.KubeConfigFromCtx(cmd.Context()) + rc, err := rookclientset.NewForConfig(kubecfg) if err != nil { return err @@ -73,105 +111,137 @@ func Cmd() *cobra.Command { return err } - group, ctx := errgroup.WithContext(cmd.Context()) - cmd.SetContext(ctx) - - CmdSetContextValue(cmd, CtxKeyKubeClientSet, clientset) - CmdSetContextValue(cmd, CtxKeyRookClientSet, rc) - CmdSetContextValue(cmd, CtxKeyAkashClientSet, ac) - CmdSetContextValue(cmd, CtxKeyPubSub, pubsub.New(1000)) - CmdSetContextValue(cmd, CtxKeyErrGroup, group) + fromctx.CmdSetContextValue(cmd, CtxKeyRookClientSet, rc) + fromctx.CmdSetContextValue(cmd, fromctx.CtxKeyAkashClientSet, ac) return nil }, RunE: func(cmd *cobra.Command, args []string) error { - bus := PubSubFromCtx(cmd.Context()) - group := ErrGroupFromCtx(cmd.Context()) + ctx := cmd.Context() + + bus := fromctx.PubSubFromCtx(ctx) + group := fromctx.ErrGroupFromCtx(ctx) - var storage []Storage - st, err := NewCeph(cmd.Context()) + var storage []QuerierStorage + st, err := NewCeph(ctx) if err != nil { return err } storage = append(storage, st) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + if st, err = NewRancher(ctx); err != nil { + return err + } - // Channel to receive error messages from the goroutine - errChan := make(chan error, 1) + fd := newFeatureDiscovery(ctx) - // Start FeatureDiscovery in a separate goroutine - go func() { - errChan <- FeatureDiscovery(ctx) - }() + storage = append(storage, st) - // ... other code ... - - select { - case err := <-errChan: - // Handle error from FeatureDiscovery - // You might log the error, or take corrective action - log.Printf("FeatureDiscovery encountered an error: %v", err) - case <-cmd.Context().Done(): - // Handle the case where the main command is stopped - // Cancel the context used by FeatureDiscovery - cancel() + clState := &clusterState{ + ctx: ctx, + querierCluster: newQuerierCluster(), } - if st, err = NewRancher(cmd.Context()); err != nil { - return err - } - storage = append(storage, st) + fromctx.CmdSetContextValue(cmd, CtxKeyStorage, storage) + fromctx.CmdSetContextValue(cmd, CtxKeyFeatureDiscovery, fd) + fromctx.CmdSetContextValue(cmd, CtxKeyClusterState, QuerierCluster(clState)) + + ctx = cmd.Context() - CmdSetContextValue(cmd, CtxKeyStorage, storage) + restPort := viper.GetUint16(FlagRESTPort) + grpcPort := viper.GetUint16(FlagGRPCPort) - apiTimeout, _ := cmd.Flags().GetDuration(FlagAPITimeout) - queryTimeout, _ := cmd.Flags().GetDuration(FlagQueryTimeout) - port, _ := cmd.Flags().GetUint16(FlagAPIPort) + apiTimeout := viper.GetDuration(FlagAPITimeout) + queryTimeout := viper.GetDuration(FlagQueryTimeout) + restEndpoint := fmt.Sprintf(":%d", restPort) + grpcEndpoint := fmt.Sprintf(":%d", grpcPort) - // fixme ovrclk/engineering#609 - // nolint: gosec - srv := &http.Server{ - Addr: fmt.Sprintf(":%d", port), - Handler: newRouter(LogFromCtx(cmd.Context()).WithName("router"), apiTimeout, queryTimeout), + log := fromctx.LogrFromCtx(ctx) + + restSrv := &http.Server{ + Addr: restEndpoint, + Handler: newRouter(apiTimeout, queryTimeout), BaseContext: func(_ net.Listener) context.Context { - return cmd.Context() + return ctx }, + ReadHeaderTimeout: 5 * time.Second, + ReadTimeout: 60 * time.Second, } + grpcSrv := grpc.NewServer(grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ + MinTime: 30 * time.Second, + PermitWithoutStream: false, + })) + + inventory.RegisterClusterRPCServer(grpcSrv, &grpcMsgService{ + ctx: ctx, + }) + + reflection.Register(grpcSrv) + group.Go(func() error { - return srv.ListenAndServe() + return configWatcher(ctx, viper.GetString(FlagConfig)) }) + group.Go(clState.run) + group.Go(fd.Wait) group.Go(func() error { - <-cmd.Context().Done() - return srv.Shutdown(cmd.Context()) + log.Info(fmt.Sprintf("rest listening on \"%s\"", restEndpoint)) + + return restSrv.ListenAndServe() }) - factory := informers.NewSharedInformerFactory(KubeClientFromCtx(cmd.Context()), 0) + group.Go(func() error { + grpcLis, err := net.Listen("tcp", grpcEndpoint) + if err != nil { + return err + } + + log.Info(fmt.Sprintf("grpc listening on \"%s\"", grpcEndpoint)) - InformKubeObjects(cmd.Context(), + return grpcSrv.Serve(grpcLis) + }) + + group.Go(func() error { + <-ctx.Done() + err := restSrv.Shutdown(context.Background()) + + grpcSrv.GracefulStop() + + return err + }) + + kc := fromctx.KubeClientFromCtx(ctx) + factory := informers.NewSharedInformerFactory(kc, 0) + + InformKubeObjects(ctx, bus, factory.Core().V1().Namespaces().Informer(), "ns") - InformKubeObjects(cmd.Context(), + InformKubeObjects(ctx, bus, factory.Storage().V1().StorageClasses().Informer(), "sc") - InformKubeObjects(cmd.Context(), + InformKubeObjects(ctx, bus, factory.Core().V1().PersistentVolumes().Informer(), "pv") - InformKubeObjects(cmd.Context(), + InformKubeObjects(ctx, bus, factory.Core().V1().Nodes().Informer(), "nodes") - return group.Wait() + fromctx.StartupChFromCtx(ctx) <- struct{}{} + err = group.Wait() + + if !errors.Is(err, context.Canceled) { + return err + } + + return nil }, } @@ -190,11 +260,23 @@ func Cmd() *cobra.Command { panic(err) } - cmd.Flags().Uint16(FlagAPIPort, 8080, "port to REST api") - if err = viper.BindPFlag(FlagAPIPort, cmd.Flags().Lookup(FlagAPIPort)); err != nil { + cmd.Flags().Uint16(FlagRESTPort, 8080, "port to REST api") + if err = viper.BindPFlag(FlagRESTPort, cmd.Flags().Lookup(FlagRESTPort)); err != nil { panic(err) } + cmd.Flags().Uint16(FlagGRPCPort, 8081, "port to GRPC api") + if err = viper.BindPFlag(FlagGRPCPort, cmd.Flags().Lookup(FlagGRPCPort)); err != nil { + panic(err) + } + + cmd.Flags().String(FlagConfig, "", "inventory configuration flag") + if err = viper.BindPFlag(FlagConfig, cmd.Flags().Lookup(FlagConfig)); err != nil { + panic(err) + } + + cmd.AddCommand(cmdFeatureDiscoveryNode()) + return cmd } @@ -206,15 +288,19 @@ func loadKubeConfig(c *cobra.Command) error { return err } - CmdSetContextValue(c, CtxKeyKubeConfig, config) + fromctx.CmdSetContextValue(c, fromctx.CtxKeyKubeConfig, config) return nil } -func newRouter(_ logr.Logger, apiTimeout, queryTimeout time.Duration) *mux.Router { - router := mux.NewRouter() +func newRouter(apiTimeout, queryTimeout time.Duration) *router { + mRouter := mux.NewRouter() + rt := &router{ + Router: mRouter, + queryTimeout: queryTimeout, + } - router.Use(func(h http.Handler) http.Handler { + mRouter.Use(func(h http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { rCtx, cancel := context.WithTimeout(r.Context(), apiTimeout) defer cancel() @@ -223,77 +309,131 @@ func newRouter(_ logr.Logger, apiTimeout, queryTimeout time.Duration) *mux.Route }) }) - router.HandleFunc("/inventory", func(w http.ResponseWriter, req *http.Request) { - storage := StorageFromCtx(req.Context()) - inv := akashv2beta2.Inventory{ - TypeMeta: metav1.TypeMeta{ - Kind: "Inventory", - APIVersion: "akash.network/v2beta2", - }, - ObjectMeta: metav1.ObjectMeta{ - CreationTimestamp: metav1.NewTime(time.Now().UTC()), - }, - Spec: akashv2beta2.InventorySpec{}, - Status: akashv2beta2.InventoryStatus{ - State: akashv2beta2.InventoryStatePulled, - }, + inventoryRouter := mRouter.PathPrefix("/v1").Subrouter() + inventoryRouter.HandleFunc("/inventory", rt.inventoryHandler) + + return rt +} + +func (rt *router) inventoryHandler(w http.ResponseWriter, req *http.Request) { + state := ClusterStateFromCtx(req.Context()) + + resp, err := state.Query(req.Context()) + + var data []byte + + defer func() { + if err != nil { + w.WriteHeader(http.StatusInternalServerError) } - var data []byte + if len(data) > 0 { + _, _ = w.Write(data) + } + }() - ctx, cancel := context.WithTimeout(req.Context(), queryTimeout) - defer func() { - if errors.Is(ctx.Err(), context.DeadlineExceeded) { - w.WriteHeader(http.StatusRequestTimeout) - } + if err != nil { + return + } - if len(data) > 0 { - _, _ = w.Write(data) - } - }() + if req.URL.Query().Has("pretty") { + data, err = json.MarshalIndent(&resp, "", " ") + } else { + data, err = json.Marshal(&resp) + } - datach := make(chan runner.Result, 1) - var wg sync.WaitGroup + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + data = []byte(err.Error()) + } else { + w.Header().Set("Content-Type", "application/json") + } +} - wg.Add(len(storage)) +func (gm *grpcMsgService) QueryCluster(ctx context.Context, _ *emptypb.Empty) (*inventory.Cluster, error) { + clq := ClusterStateFromCtx(gm.ctx) - for idx := range storage { - go func(idx int) { - defer wg.Done() + res, err := clq.Query(ctx) + if err != nil { + return nil, err + } - datach <- runner.NewResult(storage[idx].Query(ctx)) - }(idx) + return &res, nil +} + +func (gm *grpcMsgService) StreamCluster(_ *emptypb.Empty, stream inventory.ClusterRPC_StreamClusterServer) error { + bus := fromctx.PubSubFromCtx(gm.ctx) + + subch := bus.Sub(topicClusterState) + + defer func() { + bus.Unsub(subch, topicClusterState) + }() + +loop: + for { + select { + case <-gm.ctx.Done(): + return gm.ctx.Err() + case <-stream.Context().Done(): + return stream.Context().Err() + case msg, ok := <-subch: + if !ok { + continue loop + } + val := msg.(inventory.Cluster) + if err := stream.Send(&val); err != nil { + return err + } } + } +} - go func() { - defer cancel() - wg.Wait() - }() - - done: - for { - select { - case <-ctx.Done(): - break done - case res := <-datach: - if res.Error() != nil { - inv.Status.Messages = append(inv.Status.Messages, res.Error().Error()) - } +func configWatcher(ctx context.Context, file string) error { + config, err := loadConfig(file, false) + if err != nil { + return err + } - if inventory, valid := res.Value().([]akashv2beta2.InventoryClusterStorage); valid { - inv.Spec.Storage = append(inv.Spec.Storage, inventory...) - } - } + var watcher *fsnotify.Watcher + var evtch chan fsnotify.Event + + if strings.HasSuffix(file, "yaml") { + watcher, err = fsnotify.NewWatcher() + if err != nil { + return err } + } - var err error - if data, err = json.Marshal(&inv); err != nil { - w.WriteHeader(http.StatusInternalServerError) - data = []byte(err.Error()) - } else { - w.Header().Set("Content-Type", "application/json") + defer func() { + if watcher != nil { + _ = watcher.Close() } - }) + }() - return router + if watcher != nil { + if err = watcher.Add(file); err != nil { + return err + } + + evtch = watcher.Events + } + + bus := fromctx.PubSubFromCtx(ctx) + + bus.Pub(config, []string{"config"}, pubsub.WithRetain()) + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case evt := <-evtch: + if evt.Has(fsnotify.Create) || evt.Has(fsnotify.Write) { + config, _ = loadConfig(evt.Name, true) + } else if evt.Has(fsnotify.Remove) { + config, _ = loadConfig("", true) + } + bus.Pub(config, []string{"config"}, pubsub.WithRetain()) + } + } } diff --git a/operator/inventory/config.go b/operator/inventory/config.go new file mode 100644 index 00000000..51e69f13 --- /dev/null +++ b/operator/inventory/config.go @@ -0,0 +1,332 @@ +package inventory + +import ( + "fmt" + "os" + "regexp" + "slices" + "sort" + "strings" + + "github.com/blang/semver/v4" + "github.com/spf13/viper" + "gopkg.in/yaml.v3" +) + +type ExcludeRules []*regexp.Regexp + +type ConfigStorage struct { + Exclude ExcludeRules `json:"exclude" yaml:"exclude"` +} + +type ConfigNodes struct { + Exclude ExcludeRules `json:"exclude" yaml:"exclude"` +} + +type ExcludeNodeStorage struct { + NodeFilter *regexp.Regexp `json:"node_filter" yaml:"node_filter"` + Classes []string `json:"classes" yaml:"classes"` +} + +type ExcludeNodesStorage []ExcludeNodeStorage + +type Exclude struct { + Nodes ExcludeRules `json:"nodes" yaml:"nodes"` + NodeStorage ExcludeNodesStorage `json:"node_storage" yaml:"node_storage"` +} + +type Config struct { + Version semver.Version `json:"version" yaml:"version"` + ClusterStorage []string `json:"cluster_storage" yaml:"cluster_storage"` + Exclude Exclude `json:"exclude" yaml:"exclude"` + + dirty bool +} + +var defaultConfig = []byte(`--- +version: v1 +cluster_storage: [] +exclude: + nodes: [] + node_storage: [] +`) + +func loadConfig(cfg string, defaultOnErr bool) (res Config, err error) { + var data []byte + + defer func() { + if err != nil && defaultOnErr { + _ = yaml.Unmarshal(defaultConfig, &res) + } + }() + + // nolint: gocritic + if cfg == "" { + data = defaultConfig + } else if strings.HasSuffix(cfg, "yaml") { + data, err = os.ReadFile(viper.GetString(FlagConfig)) + } else { + data = []byte(cfg) + } + + if err != nil { + return res, err + } + + if err := yaml.Unmarshal(data, &res); err != nil { + return res, err + } + + return res, nil +} + +func (cfg *Config) FilterOutStorageClasses(available []string) { + if slices.Equal(cfg.ClusterStorage, available) { + return + } + + presentSc := make(map[string]bool) + requestedSc := make(map[string]bool) + + for _, sc := range available { + presentSc[sc] = true + } + + for _, sc := range cfg.ClusterStorage { + requestedSc[sc] = true + } + + for sc := range requestedSc { + if _, exists := presentSc[sc]; !exists { + delete(requestedSc, sc) + } + } + + cfg.ClusterStorage = make([]string, 0, len(requestedSc)) + + for sc := range requestedSc { + cfg.ClusterStorage = append(cfg.ClusterStorage, sc) + } + + sort.Strings(cfg.ClusterStorage) + + for i := range cfg.Exclude.NodeStorage { + for j, class := range cfg.Exclude.NodeStorage[i].Classes { + if _, exists := requestedSc[class]; !exists { + cfg.Exclude.NodeStorage[i].Classes = remove(cfg.Exclude.NodeStorage[i].Classes, j) + } + } + } + + cfg.dirty = true +} + +func (cfg *Config) Copy() Config { + res := Config{ + ClusterStorage: make([]string, len(cfg.ClusterStorage)), + Exclude: cfg.Exclude.Copy(), + dirty: false, + } + + copy(res.ClusterStorage, cfg.ClusterStorage) + + return res +} + +func (nd *Exclude) Copy() Exclude { + res := Exclude{ + Nodes: nd.Nodes.Copy(), + NodeStorage: nd.NodeStorage.Copy(), + } + + return res +} + +func (nd *ExcludeRules) Copy() ExcludeRules { + res := make(ExcludeRules, len(*nd)) + + copy(res, *nd) + + return res +} + +func (nd *ExcludeNodesStorage) Copy() ExcludeNodesStorage { + res := make(ExcludeNodesStorage, 0, len(*nd)) + + for _, rule := range *nd { + res = append(res, rule.Copy()) + } + + return res +} + +func (nd *ExcludeNodeStorage) Copy() ExcludeNodeStorage { + res := ExcludeNodeStorage{ + NodeFilter: nd.NodeFilter, + Classes: make([]string, len(nd.Classes)), + } + + copy(res.Classes, nd.Classes) + + return res +} + +func (cfg *Config) UnmarshalYAML(node *yaml.Node) error { + res := Config{} + + var err error + +loop: + for i := 0; i < len(node.Content); i += 2 { + var val interface{} + switch node.Content[i].Value { + case "version": + if res.Version, err = semver.ParseTolerant(node.Content[i+1].Value); err != nil { + return fmt.Errorf("%w: %w", errCapabilitiesInvalidVersion, err) + } + continue loop + case "cluster_storage": + val = &res.ClusterStorage + case "exclude": + val = &res.Exclude + default: + return fmt.Errorf("config: unexpected field %s", node.Content[i].Value) // nolint: goerr113 + } + + if err := node.Content[i+1].Decode(val); err != nil { + return err + } + } + + availSc := make(map[string]bool) + for _, sc := range res.ClusterStorage { + availSc[sc] = true + } + + for _, exl := range res.Exclude.NodeStorage { + for _, sc := range exl.Classes { + if _, exists := availSc[sc]; !exists { + return fmt.Errorf("storage class \"%s\" from exclude references non-existing class", sc) // nolint: goerr113 + } + } + + sort.Strings(exl.Classes) + } + + sort.Strings(res.ClusterStorage) + + *cfg = res + + return nil +} + +func (nd *ExcludeRules) UnmarshalYAML(node *yaml.Node) error { + var excludes []string + + if err := node.Decode(&excludes); err != nil { + return err + } + + res := make(ExcludeRules, 0, len(excludes)) + for i, ex := range excludes { + if ex == "" { + return fmt.Errorf("empty regexp filters are not allowed") // nolint: goerr113 + } + + r, err := regexp.Compile(ex) + if err != nil { + return fmt.Errorf("%w: unable to compile exclude \"%s\" at index \"%d\" into regexp", err, ex, i) + } + + res = append(res, r) + } + + *nd = res + + return nil +} + +func (nd *ExcludeNodeStorage) UnmarshalYAML(node *yaml.Node) error { + tmp := struct { + NodeFilter string `yaml:"node_filter"` + Classes []string `yaml:"classes"` + }{} + + if err := node.Decode(&tmp); err != nil { + return err + } + + if tmp.NodeFilter == "" { + return fmt.Errorf("empty regexp filters are not allowed") // nolint: goerr113 + } + + r, err := regexp.Compile(tmp.NodeFilter) + if err != nil { + return fmt.Errorf("%w: unable to compile exclude node filter\"%s\" into regexp", err, tmp.NodeFilter) + } + + res := ExcludeNodeStorage{ + NodeFilter: r, + Classes: tmp.Classes, + } + + *nd = res + + return nil +} + +func (nd *Exclude) IsNodeExcluded(name string) bool { + for _, r := range nd.Nodes { + if r.MatchString(name) { + return true + } + } + + return false +} + +func (nd *Exclude) IsStorageNodeExcluded(name string, class string) bool { + for _, r := range nd.NodeStorage { + for _, c := range r.Classes { + if c == class && r.NodeFilter.MatchString(name) { + return true + } + } + } + + return false +} + +func (cfg *Config) HasStorageClass(name string) bool { + for _, class := range cfg.ClusterStorage { + if class == name { + return true + } + } + + return false +} + +func (cfg *Config) StorageClassesForNode(name string) []string { + sc := make([]string, len(cfg.ClusterStorage)) + copy(sc, cfg.ClusterStorage) + + for _, rule := range cfg.Exclude.NodeStorage { + if !rule.NodeFilter.MatchString(name) { + continue + } + + for _, eClass := range rule.Classes { + for i, class := range sc { + if eClass == class { + sc = remove(sc, i) + } + } + } + } + + res := make([]string, len(sc)) + copy(res, sc) + + return sc +} diff --git a/operator/inventory/config_test.go b/operator/inventory/config_test.go new file mode 100644 index 00000000..c751dd79 --- /dev/null +++ b/operator/inventory/config_test.go @@ -0,0 +1,238 @@ +package inventory + +import ( + "testing" + + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v3" +) + +func TestConfigEmpty(t *testing.T) { + var data = []byte(`--- +version: v1 +cluster_storage: [] +exclude: + nodes: [] + node_storage: [] +`) + + cfg := &Config{} + + err := yaml.Unmarshal(data, cfg) + require.NoError(t, err) + require.Len(t, cfg.ClusterStorage, 0) + require.Len(t, cfg.Exclude.Nodes, 0) + require.Len(t, cfg.Exclude.NodeStorage, 0) + + cp := cfg.Copy() + require.Len(t, cp.ClusterStorage, 0) + require.Len(t, cp.Exclude.Nodes, 0) + require.Len(t, cp.Exclude.NodeStorage, 0) +} + +func TestConfigClusterStorage(t *testing.T) { + var data = []byte(`--- +version: v1 +cluster_storage: + - default + - beta2 +exclude: + nodes: [] + node_storage: [] +`) + + storage := []string{ + "beta2", + "default", + } + + cfg := &Config{} + + err := yaml.Unmarshal(data, cfg) + require.NoError(t, err) + require.Len(t, cfg.ClusterStorage, 2) + require.Equal(t, storage, cfg.ClusterStorage) + require.Len(t, cfg.Exclude.Nodes, 0) + require.Len(t, cfg.Exclude.NodeStorage, 0) + + cp := cfg.Copy() + require.Len(t, cp.ClusterStorage, 2) + require.Equal(t, storage, cp.ClusterStorage) + require.Len(t, cp.Exclude.Nodes, 0) + require.Len(t, cp.Exclude.NodeStorage, 0) + + // there are no exclude rules so any node must have all the same storage classes allowed + allowedSc := cp.StorageClassesForNode("test") + require.Len(t, allowedSc, 2) + require.Equal(t, storage, allowedSc) +} + +func TestConfigClusterStorageExclude(t *testing.T) { + var data = []byte(`--- +version: v1 +cluster_storage: + - default + - beta2 +exclude: + nodes: [] + node_storage: + - node_filter: ^test + classes: + - default +`) + + storage := []string{ + "beta2", + "default", + } + + cfg := &Config{} + + err := yaml.Unmarshal(data, cfg) + require.NoError(t, err) + require.Len(t, cfg.ClusterStorage, 2) + require.Equal(t, storage, cfg.ClusterStorage) + require.Len(t, cfg.Exclude.Nodes, 0) + require.Len(t, cfg.Exclude.NodeStorage, 1) + + cp := cfg.Copy() + require.Len(t, cp.ClusterStorage, 2) + require.Equal(t, storage, cp.ClusterStorage) + require.Len(t, cp.Exclude.Nodes, 0) + require.Len(t, cp.Exclude.NodeStorage, 1) + + allowedSc := cp.StorageClassesForNode("test") + require.Len(t, allowedSc, 1) + require.Equal(t, storage[0], "beta2") +} + +func TestConfigClusterStorageExclude2(t *testing.T) { + var data = []byte(`--- +version: v1 +cluster_storage: + - default + - beta2 +exclude: + nodes: [] + node_storage: + - node_filter: ^test + classes: + - default + - beta2 +`) + + storage := []string{ + "beta2", + "default", + } + + cfg := &Config{} + + err := yaml.Unmarshal(data, cfg) + require.NoError(t, err) + require.Len(t, cfg.ClusterStorage, 2) + require.Equal(t, storage, cfg.ClusterStorage) + require.Len(t, cfg.Exclude.Nodes, 0) + require.Len(t, cfg.Exclude.NodeStorage, 1) + + cp := cfg.Copy() + require.Len(t, cp.ClusterStorage, 2) + require.Equal(t, storage, cp.ClusterStorage) + require.Len(t, cp.Exclude.Nodes, 0) + require.Len(t, cp.Exclude.NodeStorage, 1) + + allowedSc := cp.StorageClassesForNode("test") + require.Len(t, allowedSc, 0) +} + +func TestConfigClusterStorageNotListed(t *testing.T) { + var data = []byte(`--- +version: v1 +cluster_storage: + - default +exclude: + nodes: [] + node_storage: + - node_filter: ^test + classes: + - default + - beta2 +`) + + cfg := &Config{} + + err := yaml.Unmarshal(data, cfg) + require.Error(t, err) +} + +func TestConfigExcludeNode(t *testing.T) { + var data = []byte(`--- +version: v1 +cluster_storage: + - default + - beta2 +exclude: + nodes: + - ^test +`) + + cfg := &Config{} + + err := yaml.Unmarshal(data, cfg) + require.NoError(t, err) + require.Len(t, cfg.ClusterStorage, 2) + require.Len(t, cfg.Exclude.Nodes, 1) + require.Len(t, cfg.Exclude.NodeStorage, 0) + + cp := cfg.Copy() + require.Len(t, cp.ClusterStorage, 2) + require.Len(t, cp.Exclude.Nodes, 1) + require.Len(t, cp.Exclude.NodeStorage, 0) + + require.True(t, cp.Exclude.IsNodeExcluded("test")) +} + +func TestConfigClusterStorageAvailable(t *testing.T) { + var data = []byte(`--- +version: v1 +cluster_storage: + - default + - beta2 +exclude: + nodes: [] + node_storage: + - node_filter: ^test + classes: + - default + - beta2 +`) + + storage := []string{ + "beta2", + "default", + } + + storageC := []string{ + "beta2", + } + + cfg := &Config{} + + err := yaml.Unmarshal(data, cfg) + require.NoError(t, err) + require.Len(t, cfg.ClusterStorage, 2) + require.Equal(t, storage, cfg.ClusterStorage) + require.Len(t, cfg.Exclude.Nodes, 0) + require.Len(t, cfg.Exclude.NodeStorage, 1) + + cp := cfg.Copy() + cp.FilterOutStorageClasses([]string{"beta2"}) + + require.Len(t, cp.ClusterStorage, 1) + require.Equal(t, storageC, cp.ClusterStorage) + require.Len(t, cp.Exclude.Nodes, 0) + require.Len(t, cp.Exclude.NodeStorage, 1) + + allowedSc := cp.StorageClassesForNode("test") + require.Len(t, allowedSc, 0) +} diff --git a/operator/inventory/feature-discovery-client.go b/operator/inventory/feature-discovery-client.go new file mode 100644 index 00000000..43ad0856 --- /dev/null +++ b/operator/inventory/feature-discovery-client.go @@ -0,0 +1,526 @@ +package inventory + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "slices" + "sort" + "strings" + "time" + + inventoryV1 "github.com/akash-network/akash-api/go/inventory/v1" + "github.com/go-logr/logr" + "github.com/troian/pubsub" + "golang.org/x/sync/errgroup" + "google.golang.org/grpc" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/protobuf/types/known/emptypb" + corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8stypes "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/portforward" + "k8s.io/client-go/transport/spdy" + + "github.com/akash-network/provider/cluster/kube/builder" + "github.com/akash-network/provider/tools/fromctx" +) + +type nodeStateEnum int + +const ( + daemonSetLabelSelector = "app=hostfeaturediscovery" + daemonSetNamespace = "akash-services" + reconnectTimeout = 5 * time.Second +) + +const ( + nodeStateUpdated nodeStateEnum = iota + nodeStateRemoved +) + +type k8sPatch struct { + Op string `json:"op"` + Path string `json:"path"` + Value interface{} `json:"value"` +} + +type podStream struct { + ctx context.Context + cancel context.CancelFunc + group *errgroup.Group + log logr.Logger + nodeName string + address string + port uint16 +} + +type nodeState struct { + state nodeStateEnum + name string + node inventoryV1.Node +} + +type featureDiscovery struct { + querierNodes + ctx context.Context + group *errgroup.Group + log logr.Logger + kc *kubernetes.Clientset +} + +func newFeatureDiscovery(ctx context.Context) *featureDiscovery { + log := fromctx.LogrFromCtx(ctx).WithName("feature-discovery") + + group, ctx := errgroup.WithContext(ctx) + + fd := &featureDiscovery{ + log: log, + ctx: logr.NewContext(ctx, log), + group: group, + kc: fromctx.KubeClientFromCtx(ctx), + querierNodes: newQuerierNodes(), + } + + group.Go(fd.connectorRun) + group.Go(fd.run) + group.Go(fd.nodeLabeler) + + return fd +} + +func (fd *featureDiscovery) Wait() error { + return fd.group.Wait() +} + +func (fd *featureDiscovery) connectorRun() error { + watcher, err := fd.kc.CoreV1().Pods(daemonSetNamespace).Watch(fd.ctx, metav1.ListOptions{ + LabelSelector: builder.AkashManagedLabelName + "=true" + + ",app.kubernetes.io/name=operator-inventory-node" + + ",app.kubernetes.io/component=inventory" + + ",app.kubernetes.io/part-of=operator", + }) + + if err != nil { + return fmt.Errorf("error setting up Kubernetes watcher: %w", err) + } + + nodes := make(map[string]*podStream) + + for { + select { + case <-fd.ctx.Done(): + for _, nd := range nodes { + nd.cancel() + delete(nodes, nd.nodeName) + } + + return fd.ctx.Err() + case event := <-watcher.ResultChan(): + if obj, valid := event.Object.(*corev1.Pod); valid { + nodeName := obj.Spec.NodeName + + switch event.Type { + case watch.Added: + fallthrough + case watch.Modified: + if obj.Status.Phase == corev1.PodRunning && obj.Status.PodIP != "" { + if _, exists := nodes[nodeName]; exists { + continue + } + + var containerPort uint16 + + for _, container := range obj.Spec.Containers { + if container.Name == fdContainerName { + for _, port := range container.Ports { + if port.Name == fdContainerGRPCPortName { + containerPort = uint16(port.ContainerPort) + break + } + } + break + } + } + + nodes[nodeName], err = newNodeWatcher(fd.ctx, nodeName, obj.Name, obj.Status.PodIP, containerPort) + if err != nil { + return err + } + } + case watch.Deleted: + nd, exists := nodes[nodeName] + if !exists { + continue + } + + nd.cancel() + delete(nodes, nodeName) + } + } + } + } +} + +func (fd *featureDiscovery) run() error { + nodes := make(map[string]inventoryV1.Node) + + snapshot := func() inventoryV1.Nodes { + res := make(inventoryV1.Nodes, 0, len(nodes)) + + for _, nd := range nodes { + res = append(res, nd.Dup()) + } + + return res + } + + bus := fromctx.PubSubFromCtx(fd.ctx) + + events := bus.Sub(topicNodeState) + defer bus.Unsub(events) + for { + select { + case <-fd.ctx.Done(): + return fd.ctx.Err() + case revt := <-events: + switch evt := revt.(type) { + case nodeState: + switch evt.state { + case nodeStateUpdated: + nodes[evt.name] = evt.node + case nodeStateRemoved: + delete(nodes, evt.name) + } + + bus.Pub(snapshot(), []string{topicNodes}, pubsub.WithRetain()) + default: + } + case req := <-fd.reqch: + resp := respNodes{ + res: snapshot(), + } + + req.respCh <- resp + } + } +} + +func (fd *featureDiscovery) nodeLabeler() error { + bus := fromctx.PubSubFromCtx(fd.ctx) + log := fromctx.LogrFromCtx(fd.ctx) + + var unsubChs []<-chan interface{} + var eventsConfigBackup <-chan interface{} + var eventsBackup <-chan interface{} + var events <-chan interface{} + + eventsConfig := bus.Sub("config") + unsubChs = append(unsubChs, eventsConfig) + + configReloadCh := make(chan struct{}, 1) + + defer func() { + for _, ch := range unsubChs { + bus.Unsub(ch) + } + }() + + var cfg Config + + signalConfigReload := func() { + select { + case configReloadCh <- struct{}{}: + eventsConfigBackup = eventsConfig + eventsBackup = events + + events = nil + eventsConfig = nil + default: + } + } + + for { + select { + case <-fd.ctx.Done(): + return fd.ctx.Err() + case <-configReloadCh: + log.Info("received signal to rebuild config. invalidating all inventory and restarting query process") + fd.reloadConfig(cfg) + + events = eventsBackup + eventsConfig = eventsConfigBackup + + if events == nil { + events = bus.Sub("nodes", "sc") + unsubChs = append(unsubChs, events) + } + + case rawEvt := <-events: + if evt, valid := rawEvt.(watch.Event); valid { + signal := false + + switch evt.Object.(type) { + case *corev1.Node: + signal = (evt.Type == watch.Added) || (evt.Type == watch.Modified) + case *storagev1.StorageClass: + signal = evt.Type == watch.Deleted + } + + if signal { + signalConfigReload() + } + } + case evt := <-eventsConfig: + log.Info("received config update") + + cfg = evt.(Config) + signalConfigReload() + } + } +} + +func isNodeAkashLabeled(labels map[string]string) bool { + _, exists := labels[builder.AkashManagedLabelName] + + return exists +} + +func (fd *featureDiscovery) reloadConfig(cfg Config) { + log := fromctx.LogrFromCtx(fd.ctx) + + adjConfig := cfg.Copy() + + nodesList, _ := fd.kc.CoreV1().Nodes().List(fd.ctx, metav1.ListOptions{}) + + scList, _ := fd.kc.StorageV1().StorageClasses().List(fd.ctx, metav1.ListOptions{}) + + presentSc := make([]string, 0, len(scList.Items)) + for _, sc := range scList.Items { + presentSc = append(presentSc, sc.Name) + } + + sort.Strings(presentSc) + + adjConfig.FilterOutStorageClasses(presentSc) + patches := make(map[string][]k8sPatch) + + for _, node := range nodesList.Items { + var p []k8sPatch + + isExcluded := adjConfig.Exclude.IsNodeExcluded(node.Name) + + // node is currently labeled for akash inventory but is excluded from config + if isNodeAkashLabeled(node.Labels) && isExcluded { + delete(node.Labels, builder.AkashManagedLabelName) + delete(node.Annotations, AnnotationKeyCapabilities) + + p = append(p, k8sPatch{ + Op: "add", + Path: "/metadata/labels", + Value: node.Labels, + }) + p = append(p, k8sPatch{ + Op: "add", + Path: "/metadata/annotations", + Value: node.Annotations, + }) + log.Info(fmt.Sprintf("node \"%s\" has matching exclude rule. removing from intentory", node.Name)) + } else if !isNodeAkashLabeled(node.Labels) && !isExcluded { + node.Labels[builder.AkashManagedLabelName] = "true" + p = append(p, k8sPatch{ + Op: "add", + Path: "/metadata/labels", + Value: node.Labels, + }) + log.Info(fmt.Sprintf("node \"%s\" is being added to intentory", node.Name)) + } + + if !isExcluded { + var op string + caps, _ := parseNodeCapabilities(node.Annotations) + if caps == nil { + op = "add" + caps = NewAnnotationCapabilities(adjConfig.ClusterStorage) + } else { + sc := adjConfig.StorageClassesForNode(node.Name) + switch obj := caps.Capabilities.(type) { + case *CapabilitiesV1: + if !slices.Equal(sc, obj.StorageClasses) { + op = "add" + obj.StorageClasses = sc + } + default: + } + } + + if op != "" { + data, _ := json.Marshal(caps) + node.Annotations[AnnotationKeyCapabilities] = string(data) + + p = append(p, k8sPatch{ + Op: "add", + Path: "/metadata/annotations", + Value: node.Annotations, + }) + } + } + + if len(p) > 0 { + patches[node.Name] = p + } + } + + for node, p := range patches { + data, _ := json.Marshal(p) + + _, err := fd.kc.CoreV1().Nodes().Patch(fd.ctx, node, k8stypes.JSONPatchType, data, metav1.PatchOptions{}) + if err != nil { + log.Error(err, fmt.Sprintf("couldn't apply patches for node \"%s\"", node)) + } else { + log.Info(fmt.Sprintf("successfully applied labels and/or annotations patches for node \"%s\"", node)) + } + } +} + +func newNodeWatcher(ctx context.Context, nodeName string, podName string, address string, port uint16) (*podStream, error) { + ctx, cancel := context.WithCancel(ctx) + group, ctx := errgroup.WithContext(ctx) + + ps := &podStream{ + ctx: ctx, + cancel: cancel, + group: group, + log: fromctx.LogrFromCtx(ctx).WithName("node-watcher"), + nodeName: nodeName, + address: address, + port: port, + } + + kubecfg := fromctx.KubeConfigFromCtx(ctx) + + if kubecfg.BearerTokenFile != "/var/run/secrets/kubernetes.io/serviceaccount/token" { + roundTripper, upgrader, err := spdy.RoundTripperFor(kubecfg) + if err != nil { + return nil, err + } + + path := fmt.Sprintf("/api/v1/namespaces/%s/pods/%s/portforward", daemonSetNamespace, podName) + hostIP := strings.TrimPrefix(kubecfg.Host, "https://") + serverURL := url.URL{Scheme: "https", Path: path, Host: hostIP} + + dialer := spdy.NewDialer(upgrader, &http.Client{Transport: roundTripper}, http.MethodPost, &serverURL) + + errch := make(chan error, 1) + pf, err := portforward.New(dialer, []string{fmt.Sprintf(":%d", port)}, ctx.Done(), make(chan struct{}), os.Stdout, os.Stderr) + if err != nil { + return nil, err + } + + group.Go(func() error { + err := pf.ForwardPorts() + errch <- err + return err + }) + + select { + case <-pf.Ready: + case err := <-errch: + return nil, err + } + + ports, err := pf.GetPorts() + if err != nil { + return nil, err + } + + ps.address = "localhost" + ps.port = ports[0].Local + } + + go ps.run() + + return ps, nil +} + +func (nd *podStream) run() { + // Establish the gRPC connection + conn, err := grpc.Dial(fmt.Sprintf("%s:%d", nd.address, nd.port), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock()) + if err != nil { + nd.log.Error(err, "couldn't dial endpoint") + return + // return err + } + + defer func() { + _ = conn.Close() + }() + + nd.log.Info(fmt.Sprintf("(connected to node's \"%s\" inventory streamer at %s:%d", nd.nodeName, nd.address, nd.port)) + + client := inventoryV1.NewNodeRPCClient(conn) + + var stream inventoryV1.NodeRPC_StreamNodeClient + + pub := fromctx.PubSubFromCtx(nd.ctx) + + for { + for stream == nil { + conn.Connect() + + if state := conn.GetState(); state != connectivity.Ready { + if !conn.WaitForStateChange(nd.ctx, connectivity.Ready) { + return + } + } + + // do not replace empty argument with nil. stream will panic + stream, err = client.StreamNode(nd.ctx, &emptypb.Empty{}) + if err != nil { + if errors.Is(err, context.Canceled) { + return + } + + nd.log.Error(err, "couldn't establish stream") + + tctx, tcancel := context.WithTimeout(nd.ctx, 2*time.Second) + <-tctx.Done() + tcancel() + + if !errors.Is(tctx.Err(), context.DeadlineExceeded) { + return + } + } + } + + node, err := stream.Recv() + if err != nil { + pub.Pub(nodeState{ + state: nodeStateRemoved, + name: nd.nodeName, + }, []string{topicNodeState}) + + stream = nil + + if errors.Is(err, context.Canceled) { + return + } + + conn.ResetConnectBackoff() + } else { + pub.Pub(nodeState{ + state: nodeStateUpdated, + name: nd.nodeName, + node: node.Dup(), + }, []string{topicNodeState}) + } + } +} diff --git a/operator/inventory/feature-discovery-node.go b/operator/inventory/feature-discovery-node.go new file mode 100644 index 00000000..35f78742 --- /dev/null +++ b/operator/inventory/feature-discovery-node.go @@ -0,0 +1,915 @@ +package inventory + +import ( + "embed" + "encoding/json" + "errors" + "fmt" + "io" + "net" + "strconv" + "time" + + "github.com/go-logr/logr" + "github.com/jaypipes/ghw/pkg/cpu" + "github.com/jaypipes/ghw/pkg/gpu" + "github.com/jaypipes/ghw/pkg/memory" + "github.com/spf13/cobra" + "github.com/spf13/viper" + "github.com/troian/pubsub" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/reflection" + "google.golang.org/protobuf/types/known/emptypb" + corev1 "k8s.io/api/core/v1" + kerrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/watch" + + v1 "github.com/akash-network/akash-api/go/inventory/v1" + + "github.com/akash-network/provider/cluster/kube/builder" + "github.com/akash-network/provider/tools/fromctx" +) + +const ( + fdContainerName = "inventory-node" + fdContainerRESTPortName = "rest" + fdContainerGRPCPortName = "grpc" + topicNode = "node" + topicNodeState = "node-state" + topicNodes = "nodes" + topicStorage = "storage" + topicConfig = "config" + topicClusterState = "cluster-state" +) + +type gpuDevice struct { + Name string `json:"name"` + Interface string `json:"interface"` + MemorySize string `json:"memory_size"` +} + +type gpuDevices map[string]gpuDevice + +type gpuVendor struct { + Name string `json:"name"` + Devices gpuDevices `json:"devices"` +} + +type gpuVendors map[string]gpuVendor + +type dpReqType int + +const ( + dpReqCPU dpReqType = iota + dpReqGPU + dpReqMem +) + +type dpReadResp struct { + data interface{} + err error +} +type dpReadReq struct { + op dpReqType + resp chan<- dpReadResp +} + +type debuggerPod struct { + ctx context.Context + readch chan dpReadReq +} + +type msgServiceServer struct { + v1.NodeRPCServer + ctx context.Context + log logr.Logger + sub pubsub.Subscriber + reqch chan<- chan<- v1.Node +} + +type fdNodeServer struct { + ctx context.Context + log logr.Logger + reqch <-chan chan<- v1.Node + pub pubsub.Publisher + nodeName string +} + +var ( + supportedGPUs = gpuVendors{} + + //go:embed gpu-info.json + gpuDevs embed.FS +) + +func init() { + f, err := gpuDevs.Open("gpu-info.json") + if err != nil { + panic(err) + } + // close pci.ids file when done + defer func() { + _ = f.Close() + }() + + data, err := io.ReadAll(f) + if err != nil { + panic(err) + } + + err = json.Unmarshal(data, &supportedGPUs) + if err != nil { + panic(err) + } +} + +func cmdFeatureDiscoveryNode() *cobra.Command { + cmd := &cobra.Command{ + Use: "node", + Short: "feature discovery daemon-set node", + Args: cobra.ExactArgs(0), + SilenceUsage: true, + PreRunE: func(cmd *cobra.Command, args []string) error { + kubecfg := fromctx.KubeConfigFromCtx(cmd.Context()) + + var hw hwInfo + + log := fromctx.LogrFromCtx(cmd.Context()) + + if kubecfg.BearerTokenFile != "/var/run/secrets/kubernetes.io/serviceaccount/token" { + log.Info("service is not running as kubernetes pod. starting debugger pod") + + dp := &debuggerPod{ + ctx: cmd.Context(), + readch: make(chan dpReadReq, 1), + } + + group := fromctx.ErrGroupFromCtx(cmd.Context()) + + startch := make(chan struct{}) + + group.Go(func() error { + return dp.run(startch) + }) + + ctx, cancel := context.WithTimeout(cmd.Context(), 5*time.Second) + + select { + case <-ctx.Done(): + if !errors.Is(ctx.Err(), context.DeadlineExceeded) { + return ctx.Err() + } + case <-startch: + cancel() + } + + hw = dp + } else { + hw = &localHwReader{} + } + + fromctx.CmdSetContextValue(cmd, CtxKeyHwInfo, hw) + + return nil + }, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + log := fromctx.LogrFromCtx(ctx) + + log.Info("starting k8s node features discovery") + + var err error + + podName := viper.GetString(FlagPodName) + nodeName := viper.GetString(FlagNodeName) + grpcPort := viper.GetUint16(FlagGRPCPort) + + kc := fromctx.KubeClientFromCtx(ctx) + + if grpcPort == 0 { + // this is dirty hack to discover exposed api port if this service runs within kubernetes + podInfo, err := kc.CoreV1().Pods(daemonSetNamespace).Get(ctx, podName, metav1.GetOptions{}) + if err != nil { + return err + } + + for _, container := range podInfo.Spec.Containers { + if container.Name == fdContainerName { + for _, port := range container.Ports { + if port.Name == fdContainerGRPCPortName { + grpcPort = uint16(port.ContainerPort) + } + } + } + } + + if grpcPort == 0 { + return fmt.Errorf("unable to detect pod's grpc port") // nolint: goerr113 + } + } + + bus := fromctx.PubSubFromCtx(cmd.Context()) + + grpcEndpoint := fmt.Sprintf(":%d", grpcPort) + + grpcSrv := grpc.NewServer(grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ + MinTime: 30 * time.Second, + PermitWithoutStream: false, + })) + + reqch := make(chan chan<- v1.Node, 1) + + v1.RegisterNodeRPCServer(grpcSrv, &msgServiceServer{ + ctx: ctx, + log: log.WithName("msg-srv"), + sub: bus, + reqch: reqch, + }) + + reflection.Register(grpcSrv) + + group := fromctx.ErrGroupFromCtx(ctx) + + fdns := &fdNodeServer{ + ctx: ctx, + log: log.WithName("watcher"), + reqch: reqch, + pub: bus, + nodeName: nodeName, + } + + startch := make(chan struct{}, 1) + group.Go(func() error { + defer func() { + log.Info("node discovery stopped") + }() + return fdns.run(startch) + }) + + select { + case <-startch: + group.Go(func() error { + defer func() { + log.Info("grpc server stopped") + }() + + log.Info(fmt.Sprintf("grpc listening on \"%s\"", grpcEndpoint)) + + lis, err := net.Listen("tcp", grpcEndpoint) + if err != nil { + return err + } + + return grpcSrv.Serve(lis) + }) + case <-ctx.Done(): + return ctx.Err() + } + + group.Go(func() error { + <-ctx.Done() + log.Info("received shutdown signal") + + grpcSrv.GracefulStop() + + return ctx.Err() + }) + + fromctx.StartupChFromCtx(ctx) <- struct{}{} + err = group.Wait() + + if !errors.Is(err, context.Canceled) { + return err + } + + return nil + }, + } + + cmd.Flags().String(FlagPodName, "", "instance name") + if err := viper.BindPFlag(FlagPodName, cmd.Flags().Lookup(FlagPodName)); err != nil { + panic(err) + } + + cmd.Flags().String(FlagNodeName, "", "node name") + if err := viper.BindPFlag(FlagNodeName, cmd.Flags().Lookup(FlagNodeName)); err != nil { + panic(err) + } + + return cmd +} + +func (nd *fdNodeServer) run(startch chan<- struct{}) error { + kc := fromctx.KubeClientFromCtx(nd.ctx) + + nodeWatch, err := kc.CoreV1().Nodes().Watch(nd.ctx, metav1.ListOptions{ + LabelSelector: builder.AkashManagedLabelName + "=true", + FieldSelector: fields.OneTermEqualSelector(metav1.ObjectNameField, nd.nodeName).String(), + }) + if err != nil { + nd.log.Error(err, fmt.Sprintf("unable to start node watcher for \"%s\"", nd.nodeName)) + return err + } + + defer nodeWatch.Stop() + + podsWatch, err := kc.CoreV1().Pods(corev1.NamespaceAll).Watch(nd.ctx, metav1.ListOptions{ + FieldSelector: fields.OneTermEqualSelector("spec.nodeName", nd.nodeName).String(), + }) + if err != nil { + nd.log.Error(err, "unable to fetch pods") + return err + } + + defer podsWatch.Stop() + + node, initPods, err := initNodeInfo(nd.ctx, nd.nodeName) + if err != nil { + nd.log.Error(err, "unable to init node info") + return err + } + + select { + case <-nd.ctx.Done(): + return nd.ctx.Err() + case startch <- struct{}{}: + } + + signalch := make(chan struct{}, 1) + signalch <- struct{}{} + + trySignal := func() { + select { + case signalch <- struct{}{}: + default: + } + } + + trySignal() + + for { + select { + case <-nd.ctx.Done(): + return nd.ctx.Err() + case <-signalch: + nd.pub.Pub(node.Dup(), []string{topicNode}, pubsub.WithRetain()) + case req := <-nd.reqch: + req <- node.Dup() + case res := <-nodeWatch.ResultChan(): + obj := res.Object.(*corev1.Node) + switch res.Type { + case watch.Added: + fallthrough + case watch.Modified: + caps, _ := parseNodeCapabilities(obj.Annotations) + switch obj := caps.Capabilities.(type) { + case *CapabilitiesV1: + node.Capabilities.StorageClasses = obj.StorageClasses + default: + } + } + trySignal() + case res := <-podsWatch.ResultChan(): + obj := res.Object.(*corev1.Pod) + switch res.Type { + case watch.Added: + if _, exists := initPods[obj.Name]; exists { + delete(initPods, obj.Name) + } else { + for _, container := range obj.Spec.Containers { + addAllocatedResources(&node, container.Resources.Requests) + } + } + case watch.Deleted: + if _, exists := initPods[obj.Name]; exists { + delete(initPods, obj.Name) + } + + for _, container := range obj.Spec.Containers { + subAllocatedResources(&node, container.Resources.Requests) + } + } + + trySignal() + } + } +} + +func addAllocatedResources(node *v1.Node, rl corev1.ResourceList) { + for name, quantity := range rl { + switch name { + case corev1.ResourceCPU: + node.Resources.CPU.Quantity.Allocated.Add(quantity) + case corev1.ResourceMemory: + node.Resources.Memory.Quantity.Allocated.Add(quantity) + case corev1.ResourceEphemeralStorage: + node.Resources.EphemeralStorage.Allocated.Add(quantity) + case builder.ResourceGPUNvidia: + fallthrough + case builder.ResourceGPUAMD: + node.Resources.GPU.Quantity.Allocated.Add(quantity) + } + } +} + +func subAllocatedResources(node *v1.Node, rl corev1.ResourceList) { + for name, quantity := range rl { + switch name { + case corev1.ResourceCPU: + node.Resources.CPU.Quantity.Allocated.Sub(quantity) + case corev1.ResourceMemory: + node.Resources.Memory.Quantity.Allocated.Sub(quantity) + case corev1.ResourceEphemeralStorage: + node.Resources.EphemeralStorage.Allocated.Sub(quantity) + case builder.ResourceGPUNvidia: + fallthrough + case builder.ResourceGPUAMD: + node.Resources.GPU.Quantity.Allocated.Sub(quantity) + } + } +} + +func initNodeInfo(ctx context.Context, name string) (v1.Node, map[string]corev1.Pod, error) { + kc := fromctx.KubeClientFromCtx(ctx) + + cpuInfo, err := parseCPUInfo(ctx) + if err != nil { + return v1.Node{}, nil, err + } + + gpuInfo, err := parseGPUInfo(ctx) + if err != nil { + return v1.Node{}, nil, err + } + + knode, err := kc.CoreV1().Nodes().Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return v1.Node{}, nil, fmt.Errorf("%w: error fetching node %s", err, name) + } + + caps, err := parseNodeCapabilities(knode.Annotations) + if err != nil { + return v1.Node{}, nil, fmt.Errorf("%w: parsing capabilities for node%s", err, name) + } + + res := v1.Node{ + Name: knode.Name, + Resources: v1.NodeResources{ + CPU: v1.CPU{ + Quantity: v1.NewResourcePairMilli(0, 0, resource.DecimalSI), + Info: cpuInfo, + }, + GPU: v1.GPU{ + Quantity: v1.NewResourcePair(0, 0, resource.DecimalSI), + Info: gpuInfo, + }, + Memory: v1.Memory{ + Quantity: v1.NewResourcePair(0, 0, resource.DecimalSI), + Info: nil, + }, + EphemeralStorage: v1.NewResourcePair(0, 0, resource.DecimalSI), + VolumesAttached: v1.NewResourcePair(0, 0, resource.DecimalSI), + VolumesMounted: v1.NewResourcePair(0, 0, resource.DecimalSI), + }, + } + + switch obj := caps.Capabilities.(type) { + case *CapabilitiesV1: + res.Capabilities.StorageClasses = obj.StorageClasses + default: + } + + for name, r := range knode.Status.Allocatable { + switch name { + case corev1.ResourceCPU: + res.Resources.CPU.Quantity.Allocatable.SetMilli(r.MilliValue()) + case corev1.ResourceMemory: + res.Resources.Memory.Quantity.Allocatable.Set(r.Value()) + case corev1.ResourceEphemeralStorage: + res.Resources.EphemeralStorage.Allocatable.Set(r.Value()) + case builder.ResourceGPUNvidia: + case builder.ResourceGPUAMD: + res.Resources.GPU.Quantity.Allocatable.Set(r.Value()) + } + } + + initPods := make(map[string]corev1.Pod) + + podsList, err := kc.CoreV1().Pods(corev1.NamespaceAll).List(ctx, metav1.ListOptions{ + FieldSelector: fields.OneTermEqualSelector("spec.nodeName", name).String(), + }) + if err != nil { + return res, nil, err + } + + for _, pod := range podsList.Items { + for _, container := range pod.Spec.Containers { + addAllocatedResources(&res, container.Resources.Requests) + } + initPods[pod.Name] = pod + } + + return res, initPods, nil +} + +func (s *msgServiceServer) QueryNode(ctx context.Context, _ *emptypb.Empty) (*v1.Node, error) { + reqch := make(chan v1.Node, 1) + + select { + case <-s.ctx.Done(): + return nil, s.ctx.Err() + case <-ctx.Done(): + return nil, ctx.Err() + case s.reqch <- reqch: + } + + select { + case <-s.ctx.Done(): + return nil, s.ctx.Err() + case <-ctx.Done(): + return nil, ctx.Err() + case req := <-reqch: + return &req, nil + } +} + +func (s *msgServiceServer) StreamNode(_ *emptypb.Empty, stream v1.NodeRPC_StreamNodeServer) error { + subch := s.sub.Sub(topicNode) + + defer func() { + s.sub.Unsub(subch, topicNode) + }() + + for { + select { + case <-s.ctx.Done(): + return s.ctx.Err() + case <-stream.Context().Done(): + return stream.Context().Err() + case nd := <-subch: + switch msg := nd.(type) { + case v1.Node: + if err := stream.Send(&msg); err != nil { + return err + } + default: + } + } + } +} + +type hwInfo interface { + CPU(context.Context) (*cpu.Info, error) + GPU(context.Context) (*gpu.Info, error) + Memory(context.Context) (*memory.Info, error) +} + +type localHwReader struct{} + +func (lfs *localHwReader) CPU(_ context.Context) (*cpu.Info, error) { + return cpu.New() +} + +func (lfs *localHwReader) GPU(_ context.Context) (*gpu.Info, error) { + return gpu.New() +} + +func (lfs *localHwReader) Memory(_ context.Context) (*memory.Info, error) { + return memory.New() +} + +func parseCPUInfo(ctx context.Context) (v1.CPUInfoS, error) { + if err := ctx.Err(); err != nil { + return nil, err + } + + hw := HWInfoFromCtx(ctx) + + cpus, err := hw.CPU(ctx) + if err != nil { + return nil, err + } + + res := make(v1.CPUInfoS, 0, len(cpus.Processors)) + + for _, c := range cpus.Processors { + res = append(res, v1.CPUInfo{ + ID: strconv.Itoa(c.ID), + Vendor: c.Vendor, + Model: c.Model, + Vcores: c.NumThreads, + }) + } + + return res, nil +} + +func parseGPUInfo(ctx context.Context) (v1.GPUInfoS, error) { + if err := ctx.Err(); err != nil { + return nil, err + } + + hw := HWInfoFromCtx(ctx) + + gpus, err := hw.GPU(ctx) + if err != nil { + return nil, err + } + + res := make(v1.GPUInfoS, 0) + + for _, dev := range gpus.GraphicsCards { + vendor, exists := supportedGPUs[dev.DeviceInfo.Vendor.ID] + if !exists { + continue + } + + model, exists := vendor.Devices[dev.DeviceInfo.Product.ID] + if !exists { + continue + } + + res = append(res, v1.GPUInfo{ + Vendor: dev.DeviceInfo.Vendor.Name, + VendorID: dev.DeviceInfo.Vendor.ID, + Name: dev.DeviceInfo.Product.Name, + ModelID: dev.DeviceInfo.Product.ID, + Interface: model.Interface, + MemorySize: model.MemorySize, + }) + } + + return res, nil +} + +func (dp *debuggerPod) CPU(ctx context.Context) (*cpu.Info, error) { + respch := make(chan dpReadResp, 1) + + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-dp.ctx.Done(): + return nil, dp.ctx.Err() + case dp.readch <- dpReadReq{ + op: dpReqCPU, + resp: respch, + }: + } + + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-dp.ctx.Done(): + return nil, dp.ctx.Err() + case resp := <-respch: + return resp.data.(*cpu.Info), resp.err + } +} + +func (dp *debuggerPod) GPU(ctx context.Context) (*gpu.Info, error) { + respch := make(chan dpReadResp, 1) + + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-dp.ctx.Done(): + return nil, dp.ctx.Err() + case dp.readch <- dpReadReq{ + op: dpReqGPU, + resp: respch, + }: + } + + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-dp.ctx.Done(): + return nil, dp.ctx.Err() + case resp := <-respch: + return resp.data.(*gpu.Info), resp.err + } +} + +func (dp *debuggerPod) Memory(ctx context.Context) (*memory.Info, error) { + respch := make(chan dpReadResp, 1) + + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-dp.ctx.Done(): + return nil, dp.ctx.Err() + case dp.readch <- dpReadReq{ + op: dpReqMem, + resp: respch, + }: + } + + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-dp.ctx.Done(): + return nil, dp.ctx.Err() + case resp := <-respch: + return resp.data.(*memory.Info), resp.err + } +} + +func (dp *debuggerPod) run(startch chan<- struct{}) error { + log := fromctx.LogrFromCtx(dp.ctx) + + log.Info("staring debugger pod") + + req := &corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "fd-debugger-pod", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "psutil", + Image: "ghcr.io/akash-network/provider-test:latest-arm64", + Command: []string{ + "provider-services", + "operator", + "psutil", + "serve", + "--api-port=8081", + }, + Ports: []corev1.ContainerPort{ + { + Name: "api", + ContainerPort: 8081, + }, + }, + }, + }, + }, + } + + kc := fromctx.KubeClientFromCtx(dp.ctx) + + pod, err := kc.CoreV1().Pods(daemonSetNamespace).Create(dp.ctx, req, metav1.CreateOptions{}) + if err != nil && !kerrors.IsAlreadyExists(err) { + return err + } + + defer func() { + // using default context here to delete pod as main might have been canceled + _ = kc.CoreV1().Pods(daemonSetNamespace).Delete(context.Background(), pod.Name, metav1.DeleteOptions{}) + }() + + watcher, err := kc.CoreV1().Pods(daemonSetNamespace).Watch(dp.ctx, metav1.ListOptions{ + Watch: true, + ResourceVersion: pod.ResourceVersion, + FieldSelector: fields.Set{"metadata.name": pod.Name}.AsSelector().String(), + LabelSelector: labels.Everything().String(), + }) + + if err != nil { + return err + } + + defer func() { + watcher.Stop() + }() + + var apiPort int32 + + for _, container := range pod.Spec.Containers { + if container.Name == "psutil" { + for _, port := range container.Ports { + if port.Name == "api" { + apiPort = port.ContainerPort + } + } + } + } + + if apiPort == 0 { + return fmt.Errorf("debugger pod does not have port named \"api\"") // nolint: goerr113 + } + +initloop: + for { + select { + case <-dp.ctx.Done(): + return dp.ctx.Err() + case evt := <-watcher.ResultChan(): + resp := evt.Object.(*corev1.Pod) + if resp.Status.Phase != corev1.PodPending { + watcher.Stop() + startch <- struct{}{} + break initloop + } + } + } + + for { + select { + case <-dp.ctx.Done(): + return dp.ctx.Err() + case readreq := <-dp.readch: + var res string + resp := dpReadResp{} + + switch readreq.op { + case dpReqCPU: + res = "cpu" + case dpReqGPU: + res = "gpu" + case dpReqMem: + res = "memory" + } + + result := kc.CoreV1().RESTClient().Get(). + Namespace(daemonSetNamespace). + Resource("pods"). + Name(fmt.Sprintf("%s:%d", pod.Name, apiPort)). + SubResource("proxy"). + Suffix(res). + Do(dp.ctx) + + resp.err = result.Error() + + if resp.err == nil { + var data []byte + data, resp.err = result.Raw() + if resp.err == nil { + switch readreq.op { + case dpReqCPU: + var res cpu.Info + resp.err = json.Unmarshal(data, &res) + resp.data = &res + case dpReqGPU: + var res gpu.Info + resp.err = json.Unmarshal(data, &res) + resp.data = &res + case dpReqMem: + var res memory.Info + resp.err = json.Unmarshal(data, &res) + resp.data = &res + } + } + } + + readreq.resp <- resp + } + } +} + +// // ExecCmd exec command on specific pod and wait the command's output. +// func ExecCmd(ctx context.Context, podName string, command string, stdin io.Reader, stdout io.Writer, stderr io.Writer) error { +// kc := KubeClientFromCtx(ctx) +// cfg := KubeConfigFromCtx(ctx) +// +// cmd := []string{ +// "sh", +// "-c", +// command, +// } +// +// option := &corev1.PodExecOptions{ +// Command: cmd, +// Stdin: true, +// Stdout: true, +// Stderr: true, +// TTY: true, +// } +// if stdin == nil { +// option.Stdin = false +// } +// +// req := kc.CoreV1(). +// RESTClient(). +// Post(). +// Resource("pods"). +// Name(podName). +// Namespace(daemonSetNamespace). +// SubResource("exec"). +// VersionedParams(option, scheme.ParameterCodec) +// +// exec, err := remotecommand.NewSPDYExecutor(cfg, "POST", req.URL()) +// if err != nil { +// return err +// } +// err = exec.StreamWithContext(ctx, remotecommand.StreamOptions{ +// Stdin: stdin, +// Stdout: stdout, +// Stderr: stderr, +// }) +// if err != nil { +// return err +// } +// +// return nil +// } diff --git a/operator/inventory/featureDiscoveryClient.go b/operator/inventory/featureDiscoveryClient.go deleted file mode 100644 index 93b5769c..00000000 --- a/operator/inventory/featureDiscoveryClient.go +++ /dev/null @@ -1,369 +0,0 @@ -package inventory - -import ( - "context" - "encoding/json" - "fmt" - "log" - "net/http" - "sync" - "time" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - - "google.golang.org/grpc" - - "github.com/gorilla/mux" - - v1 "github.com/akash-network/akash-api/go/inventory/v1" -) - -const ( - daemonSetLabelSelector = "app=hostfeaturediscovery" - daemonSetNamespace = "akash-services" - grpcPort = ":50051" - nodeUpdateInterval = 5 * time.Second // Duration after which to print the cluster state - Added = "ADDED" - Deleted = "DELETED" -) - -var instance *ConcurrentClusterData -var once sync.Once - -// ConcurrentClusterData provides a concurrency-safe way to store and update cluster data. -type ConcurrentClusterData struct { - sync.RWMutex - cluster *v1.Cluster - podNodeMap map[string]int // Map of pod UID to node index in the cluster.Nodes slice -} - -// NewConcurrentClusterData initializes a new instance of ConcurrentClusterData with empty cluster data. -func NewConcurrentClusterData() *ConcurrentClusterData { - return &ConcurrentClusterData{ - cluster: &v1.Cluster{Nodes: []v1.Node{}}, - podNodeMap: make(map[string]int), - } -} - -// UpdateNode updates or adds the node to the cluster data. -func (ccd *ConcurrentClusterData) UpdateNode(podUID string, node *v1.Node) { - ccd.Lock() - defer ccd.Unlock() - - if nodeIndex, ok := ccd.podNodeMap[podUID]; ok { - // Node exists, update it - ccd.cluster.Nodes[nodeIndex] = *node - } else { - // Node does not exist, add it - ccd.cluster.Nodes = append(ccd.cluster.Nodes, *node) - ccd.podNodeMap[podUID] = len(ccd.cluster.Nodes) - 1 - } -} - -func (ccd *ConcurrentClusterData) RemoveNode(podUID string) { - ccd.Lock() - defer ccd.Unlock() - - if nodeIndex, ok := ccd.podNodeMap[podUID]; ok { - // Remove the node from the slice - ccd.cluster.Nodes = append(ccd.cluster.Nodes[:nodeIndex], ccd.cluster.Nodes[nodeIndex+1:]...) - delete(ccd.podNodeMap, podUID) // Remove the entry from the map - - // Update the indices in the map - for podUID, index := range ccd.podNodeMap { - if index > nodeIndex { - ccd.podNodeMap[podUID] = index - 1 - } - } - } -} - -// Helper function to perform a deep copy of the Cluster struct. -func deepCopy(cluster *v1.Cluster) *v1.Cluster { - if cluster == nil { - return nil - } - - if len(cluster.Nodes) == 0 { - // Log a warning instead of returning an error - log.Printf("Warning: Attempting to deep copy a cluster with an empty Nodes slice") - } - - // Create a new Cluster instance - copied := &v1.Cluster{} - - // Deep copy each field from the original Cluster to the new instance - // Deep copy the Nodes slice - copied.Nodes = make([]v1.Node, len(cluster.Nodes)) - for i, node := range cluster.Nodes { - // Assuming Node is a struct, create a copy - // If Node contains slices or maps, this process needs to be recursive - copiedNode := node // This is a shallow copy, adjust as needed - copied.Nodes[i] = copiedNode - } - - return copied -} - -func watchPods(clientset *kubernetes.Clientset, stopCh <-chan struct{}, clusterData *ConcurrentClusterData) error { - errCh := make(chan error, 1) // Buffered error channel - var wg sync.WaitGroup // WaitGroup to track goroutines - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - watcher, err := clientset.CoreV1().Pods(daemonSetNamespace).Watch(ctx, metav1.ListOptions{ - LabelSelector: daemonSetLabelSelector, - }) - - if err != nil { - return fmt.Errorf("error setting up Kubernetes watcher: %w", err) - } - - defer watcher.Stop() - - for { - select { - case <-stopCh: - log.Println("Stopping pod watcher") - wg.Wait() // Wait for all goroutines to finish - close(errCh) // Close the error channel - return nil - case err := <-errCh: - log.Printf("Error in goroutine: %v", err) - // Additional error handling logic can be placed here - case event, ok := <-watcher.ResultChan(): - if !ok { - wg.Wait() // Wait for all goroutines to finish - close(errCh) // Close the error channel - return fmt.Errorf("watcher channel closed unexpectedly") - } - - pod, ok := event.Object.(*corev1.Pod) - if !ok { - log.Println("Unexpected type in watcher event") - continue - } - - switch event.Type { - case Added: - if pod.Status.Phase == corev1.PodRunning && pod.Status.PodIP != "" { - wg.Add(1) - go func() { - defer wg.Done() - if err := connectToGrpcStream(pod, clusterData); err != nil { - errCh <- err - } - }() - } else { - wg.Add(1) - go func() { - defer wg.Done() - if err := waitForPodReadyAndConnect(clientset, pod, clusterData); err != nil { - errCh <- err - } - }() - } - log.Printf("Pod added: %s, UID: %s\n", pod.Name, pod.UID) - case Deleted: - clusterData.RemoveNode(string(pod.UID)) - log.Printf("Pod deleted: %s, UID: %s\n", pod.Name, pod.UID) - } - } - } -} - -// waitForPodReadyAndConnect waits for a pod to become ready before attempting to connect to its gRPC stream -func waitForPodReadyAndConnect(clientset *kubernetes.Clientset, pod *corev1.Pod, clusterData *ConcurrentClusterData) error { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) // 10-minute timeout - defer cancel() - - ticker := time.NewTicker(2 * time.Second) // Polling interval - defer ticker.Stop() - - for { - select { - case <-ctx.Done(): - return fmt.Errorf("timeout waiting for pod %s to become ready", pod.Name) - - case <-ticker.C: - currentPod, err := clientset.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) - if err != nil { - return fmt.Errorf("error getting pod status: %w", err) - } - - if currentPod.Status.Phase == corev1.PodRunning && currentPod.Status.PodIP != "" { - // Handle the error returned by connectToGrpcStream - if err := connectToGrpcStream(currentPod, clusterData); err != nil { - return fmt.Errorf("error connecting to gRPC stream for pod %s: %w", pod.Name, err) - } - return nil - } - } - } -} - -func connectToGrpcStream(pod *corev1.Pod, clusterData *ConcurrentClusterData) error { - ipAddress := fmt.Sprintf("%s%s", pod.Status.PodIP, grpcPort) - fmt.Println("Connecting to:", ipAddress) - - // Establish the gRPC connection - conn, err := grpc.Dial(ipAddress, grpc.WithInsecure(), grpc.WithBlock()) - if err != nil { - return fmt.Errorf("failed to connect to pod IP %s: %v", pod.Status.PodIP, err) - } - defer conn.Close() - - client := v1.NewMsgClient(conn) - - // Create a stream to receive updates from the node - stream, err := client.QueryNode(context.Background(), &v1.VoidNoParam{}) - if err != nil { - return fmt.Errorf("could not query node for pod IP %s: %v", pod.Status.PodIP, err) - } - - for { - node, err := stream.Recv() - if err != nil { - // Handle stream error and remove the node - clusterData.RemoveNode(string(pod.UID)) - return fmt.Errorf("stream closed for pod UID %s: %v", pod.UID, err) - } - - // Update the node information in the cluster data - clusterData.UpdateNode(string(pod.UID), node) - } -} - -func printCluster() { - // Retrieve a deep copy of the current cluster state - cluster := GetCurrentClusterState() - - // If no nodes to print, just return - if len(cluster.Nodes) == 0 { - fmt.Println("No nodes in the cluster.") - return - } - - // Print the cluster state - jsonCluster, err := json.Marshal(cluster) - if err != nil { - log.Fatalf("error marshaling cluster struct into JSON: %v", err) - } - - fmt.Println(string(jsonCluster)) -} - -func FeatureDiscovery(ctx context.Context) error { - fmt.Println("Starting up gRPC client...") - - // Use in-cluster configuration - config, err := rest.InClusterConfig() - if err != nil { - return fmt.Errorf("error obtaining in-cluster config: %v", err) - } - - clientset, err := kubernetes.NewForConfig(config) - if err != nil { - return fmt.Errorf("error creating Kubernetes client: %v", err) - } - - clusterData := GetInstance() - - var wg sync.WaitGroup - - // Start the watcher in a goroutine with error handling - errCh := make(chan error, 1) - stopCh := make(chan struct{}) - wg.Add(1) - go func() { - defer wg.Done() - defer close(errCh) - if err := watchPods(clientset, stopCh, clusterData); err != nil { - errCh <- err - } - }() - - // Error handling goroutine - go func() { - for err := range errCh { - // Log errors but don't exit - log.Printf("Error from watchPods: %v", err) - } - }() - - // Start a ticker to periodically check/print the cluster state - ticker := time.NewTicker(nodeUpdateInterval) - go func() { - for { - select { - case <-ticker.C: - printCluster() - case <-ctx.Done(): - // Context canceled, cleanup and exit - ticker.Stop() - return - } - } - }() - - // API endpoint which serves feature discovery data to Akash Provider - router := mux.NewRouter() - router.HandleFunc("/getClusterState", getClusterStateHandler).Methods("GET") - - // Use a separate goroutine for HTTP server - httpErrCh := make(chan error, 1) - go func() { - httpErrCh <- http.ListenAndServe(":8081", router) - }() - - // Wait for all goroutines to finish or for context cancellation - select { - case err := <-httpErrCh: - return fmt.Errorf("HTTP server error: %v", err) - case err := <-errCh: - return err - case <-ctx.Done(): - close(stopCh) - wg.Wait() - return ctx.Err() - } -} - -// GetInstance returns the singleton instance of ConcurrentClusterData. -func GetInstance() *ConcurrentClusterData { - once.Do(func() { - log.Println("Initializing ConcurrentClusterData instance") - instance = &ConcurrentClusterData{ - cluster: &v1.Cluster{Nodes: []v1.Node{}}, - podNodeMap: make(map[string]int), - } - }) - return instance -} - -// GetCurrentClusterState returns a deep copy of the current state of the cluster and is used primarily for API GET data -func GetCurrentClusterState() *v1.Cluster { - // Use the singleton instance to get the cluster - clusterData := GetInstance() - - // Return a deep copy of the cluster - return deepCopy(clusterData.cluster) -} - -func getClusterStateHandler(w http.ResponseWriter, r *http.Request) { - clusterState := GetCurrentClusterState() - - w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(clusterState); err != nil { - // Log the error - log.Printf("Error encoding response: %v", err) - - // Write an error response - http.Error(w, fmt.Sprintf("{\"error\": \"Internal Server Error: %v\"}", err), http.StatusInternalServerError) - return - } -} diff --git a/operator/inventory/gpu-info.json b/operator/inventory/gpu-info.json new file mode 100644 index 00000000..eded4523 --- /dev/null +++ b/operator/inventory/gpu-info.json @@ -0,0 +1,37 @@ +{ + "10de":{ + "name": "nvidia", + "devices": { + "20b0": { + "name": "", + "interface": "SXM4", + "memory_size": "40Gi" + }, + "20b1": { + "name": "", + "interface": "PCIe", + "memory_size": "40Gi" + }, + "20b2": { + "name": "", + "interface": "SXM4", + "memory_size": "80Gi" + }, + "20b3": { + "name": "", + "interface": "SXM", + "memory_size": "64Gi" + }, + "20b5": { + "name": "", + "interface": "PCIe", + "memory_size": "80Gi" + }, + "20f1": { + "name": "", + "interface": "PCIe", + "memory_size": "40Gi" + } + } + } +} diff --git a/operator/inventory/rancher.go b/operator/inventory/rancher.go index c2c7d617..11da58af 100644 --- a/operator/inventory/rancher.go +++ b/operator/inventory/rancher.go @@ -8,20 +8,23 @@ import ( "strconv" "strings" - "github.com/pkg/errors" + "github.com/troian/pubsub" corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/watch" - akashv2beta2 "github.com/akash-network/provider/pkg/apis/akash.network/v2beta2" + inventory "github.com/akash-network/akash-api/go/inventory/v1" + + "github.com/akash-network/provider/cluster/kube/builder" + "github.com/akash-network/provider/tools/fromctx" ) type rancher struct { exe RemotePodCommandExecutor ctx context.Context cancel context.CancelFunc - querier } type rancherStorage struct { @@ -32,61 +35,127 @@ type rancherStorage struct { type rancherStorageClasses map[string]*rancherStorage -func NewRancher(ctx context.Context) (Storage, error) { +func NewRancher(ctx context.Context) (QuerierStorage, error) { ctx, cancel := context.WithCancel(ctx) r := &rancher{ - exe: NewRemotePodCommandExecutor(KubeConfigFromCtx(ctx), KubeClientFromCtx(ctx)), - ctx: ctx, - cancel: cancel, - querier: newQuerier(), + exe: NewRemotePodCommandExecutor(fromctx.KubeConfigFromCtx(ctx), fromctx.KubeClientFromCtx(ctx)), + ctx: ctx, + cancel: cancel, } - group := ErrGroupFromCtx(ctx) - group.Go(r.run) + startch := make(chan struct{}, 1) + + group := fromctx.ErrGroupFromCtx(ctx) + group.Go(func() error { + return r.run(startch) + }) + + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-startch: + } return r, nil } -func (c *rancher) run() error { +func (c *rancher) run(startch chan<- struct{}) error { defer func() { c.cancel() }() - events := make(chan interface{}, 1000) + bus := fromctx.PubSubFromCtx(c.ctx) - pubsub := PubSubFromCtx(c.ctx) + var unsubChs []<-chan interface{} - defer pubsub.Unsub(events) - pubsub.AddSub(events, "ns", "sc", "pv", "nodes") + var pvWatcher watch.Interface + var pvEvents <-chan watch.Event - log := LogFromCtx(c.ctx).WithName("rancher") + defer func() { + if pvWatcher != nil { + pvWatcher.Stop() + } + + for _, ch := range unsubChs { + bus.Unsub(ch) + } + }() + + events := bus.Sub("ns", "sc", "nodes") + + log := fromctx.LogrFromCtx(c.ctx).WithName("rancher") scs := make(rancherStorageClasses) - resources := akashv2beta2.ResourcePair{ - Allocatable: math.MaxUint64, - } + allocatable := int64(math.MaxInt64) + + pvMap := make(map[string]corev1.PersistentVolume) - scSynced := false - pvSynced := false + scrapeCh := make(chan struct{}, 1) + scrapech := scrapeCh - pvCount := 0 + startch <- struct{}{} - var pendingPVs []watch.Event + tryScrape := func() { + select { + case scrapech <- struct{}{}: + default: + } + } for { select { case <-c.ctx.Done(): return c.ctx.Err() - case rawEvt := <-events: - if scSynced && len(pendingPVs) > 0 { - select { - case events <- pendingPVs[0]: - pendingPVs = pendingPVs[1:] - default: + case evt := <-pvEvents: + // nolint: gocritic + switch obj := evt.Object.(type) { + case *corev1.PersistentVolume: + switch evt.Type { + case watch.Added: + fallthrough + case watch.Modified: + res, exists := obj.Spec.Capacity[corev1.ResourceStorage] + if !exists { + break + } + + params, exists := scs[obj.Spec.StorageClassName] + if !exists { + scItem, _ := fromctx.KubeClientFromCtx(c.ctx).StorageV1().StorageClasses().Get(c.ctx, obj.Spec.StorageClassName, metav1.GetOptions{}) + + lblVal := scItem.Labels[builder.AkashManagedLabelName] + if lblVal == "" { + lblVal = falseVal + } + + params = &rancherStorage{ + isRancher: scItem.Provisioner == "rancher.io/local-path", + } + + params.isAkashManaged, _ = strconv.ParseBool(lblVal) + + scs[obj.Spec.StorageClassName] = params + } + + if _, exists = pvMap[obj.Name]; !exists { + pvMap[obj.Name] = *obj + params.allocated += uint64(res.Value()) + } + case watch.Deleted: + res, exists := obj.Spec.Capacity[corev1.ResourceStorage] + if !exists { + break + } + + delete(pvMap, obj.Name) + scs[obj.Spec.StorageClassName].allocated -= uint64(res.Value()) } + + tryScrape() } + case rawEvt := <-events: switch evt := rawEvt.(type) { case watch.Event: kind := reflect.TypeOf(evt.Object).String() @@ -98,17 +167,17 @@ func (c *rancher) run() error { evtdone: switch obj := evt.Object.(type) { case *corev1.Node: - if allocatable, exists := obj.Status.Allocatable[corev1.ResourceEphemeralStorage]; exists { - resources.Allocatable = uint64(allocatable.Value()) + if val, exists := obj.Status.Allocatable[corev1.ResourceEphemeralStorage]; exists { + allocatable = val.Value() } case *storagev1.StorageClass: switch evt.Type { case watch.Added: fallthrough case watch.Modified: - lblVal := obj.Labels["akash.network"] + lblVal := obj.Labels[builder.AkashManagedLabelName] if lblVal == "" { - lblVal = "false" + lblVal = falseVal } sc, exists := scs[obj.Name] @@ -120,99 +189,68 @@ func (c *rancher) run() error { sc.isAkashManaged, _ = strconv.ParseBool(lblVal) scs[obj.Name] = sc - scList, _ := KubeClientFromCtx(c.ctx).StorageV1().StorageClasses().List(c.ctx, metav1.ListOptions{}) - if len(scList.Items) == len(scs) && !scSynced { - scSynced = true - - if len(pendingPVs) > 0 { - select { - case events <- pendingPVs[0]: - pendingPVs = pendingPVs[1:] - default: - } + scList, _ := fromctx.KubeClientFromCtx(c.ctx).StorageV1().StorageClasses().List(c.ctx, metav1.ListOptions{}) + if len(scList.Items) == len(scs) && pvWatcher == nil { + var err error + pvWatcher, err = fromctx.KubeClientFromCtx(c.ctx).CoreV1().PersistentVolumes().Watch(c.ctx, metav1.ListOptions{}) + if err != nil { + log.Error(err, "couldn't start watcher on persistent volumes") } + pvEvents = pvWatcher.ResultChan() - pvList, _ := KubeClientFromCtx(c.ctx).CoreV1().PersistentVolumes().List(c.ctx, metav1.ListOptions{}) - if len(pvList.Items) == pvCount && !pvSynced { - pvSynced = true + pvList, err := fromctx.KubeClientFromCtx(c.ctx).CoreV1().PersistentVolumes().List(c.ctx, metav1.ListOptions{}) + if err != nil { + log.Error(err, "couldn't list persistent volumes") } - } - case watch.Deleted: - // volumes can remain without storage class so to keep metrics right when storage class suddenly - // recreated we don't delete it - default: - break evtdone - } - - log.Info(msg, "name", obj.Name) - case *corev1.PersistentVolume: - if !scSynced { - pendingPVs = append(pendingPVs, evt) - break evtdone - } - switch evt.Type { - case watch.Added: - pvCount++ - fallthrough - case watch.Modified: - resource, exists := obj.Spec.Capacity[corev1.ResourceStorage] - if !exists { - break - } + for _, pv := range pvList.Items { + capacity, exists := pv.Spec.Capacity[corev1.ResourceStorage] + if !exists { + continue + } - if params, exists := scs[obj.Spec.StorageClassName]; !exists { - scSynced = false - pendingPVs = append(pendingPVs, evt) - break evtdone - } else { - params.allocated += uint64(resource.Value()) + params := scs[pv.Spec.StorageClassName] + params.allocated += uint64(capacity.Value()) - pvList, _ := KubeClientFromCtx(c.ctx).CoreV1().PersistentVolumes().List(c.ctx, metav1.ListOptions{}) - if len(pvList.Items) == pvCount && !pvSynced { - pvSynced = true + pvMap[pv.Name] = pv } } - case watch.Deleted: - pvCount-- - resource, exists := obj.Spec.Capacity[corev1.ResourceStorage] - if !exists { - break - } - scs[obj.Spec.StorageClassName].allocated -= uint64(resource.Value()) + case watch.Deleted: + // volumes can remain without storage class so to keep metrics right when storage class suddenly + // recreated we don't delete it default: break evtdone } + log.Info(msg, "name", obj.Name) default: break evtdone } } - case req := <-c.reqch: - var resp resp - - if pvSynced { - var res []akashv2beta2.InventoryClusterStorage - - for class, params := range scs { - if params.isRancher && params.isAkashManaged { - res = append(res, akashv2beta2.InventoryClusterStorage{ + tryScrape() + case <-scrapech: + var res inventory.ClusterStorage + + for class, params := range scs { + if params.isRancher && params.isAkashManaged { + res = append(res, inventory.Storage{ + Quantity: inventory.NewResourcePair(allocatable, int64(params.allocated), resource.DecimalSI), + Info: inventory.StorageInfo{ Class: class, - ResourcePair: akashv2beta2.ResourcePair{ - Allocated: params.allocated, - Allocatable: resources.Allocatable, - }, - }) - } + }, + }) } + } - resp.res = res - } else { - resp.err = errors.New("rancher inventory is being updated") + if len(res) > 0 { + bus.Pub(storageSignal{ + driver: "rancher", + storage: res, + }, []string{topicStorage}, pubsub.WithRetain()) } - req.respCh <- resp + scrapech = scrapeCh } } } diff --git a/operator/inventory/state.go b/operator/inventory/state.go new file mode 100644 index 00000000..7930141a --- /dev/null +++ b/operator/inventory/state.go @@ -0,0 +1,82 @@ +package inventory + +import ( + "context" + + "github.com/troian/pubsub" + + inventory "github.com/akash-network/akash-api/go/inventory/v1" + + "github.com/akash-network/provider/tools/fromctx" +) + +type clusterState struct { + ctx context.Context + querierCluster +} + +func (s *clusterState) run() error { + bus := fromctx.PubSubFromCtx(s.ctx) + + storage := make(map[string]inventory.ClusterStorage) + + state := inventory.Cluster{} + signalch := make(chan struct{}, 1) + + datach := bus.Sub(topicNodes, topicStorage, topicConfig) + + defer bus.Unsub(datach) + + var cfg Config + + trySignal := func() { + select { + case signalch <- struct{}{}: + default: + } + } + + for { + select { + case <-s.ctx.Done(): + return s.ctx.Err() + case data := <-datach: + switch obj := data.(type) { + case Config: + cfg = obj + case inventory.Nodes: + state.Nodes = obj + trySignal() + case storageSignal: + storage[obj.driver] = obj.storage + + prealloc := 0 + for _, drv := range storage { + prealloc += len(drv) + } + + state.Storage = make(inventory.ClusterStorage, 0, prealloc) + + for _, drv := range storage { + for _, class := range drv { + if !cfg.HasStorageClass(class.Info.Class) { + continue + } + + state.Storage = append(state.Storage, class) + } + } + + trySignal() + default: + } + case req := <-s.reqch: + req.respCh <- respCluster{ + res: *state.Dup(), + err: nil, + } + case <-signalch: + bus.Pub(*state.Dup(), []string{topicClusterState}, pubsub.WithRetain()) + } + } +} diff --git a/operator/inventory/types.go b/operator/inventory/types.go index 33ef48b9..798e36f6 100644 --- a/operator/inventory/types.go +++ b/operator/inventory/types.go @@ -3,7 +3,7 @@ package inventory import ( "context" - "github.com/cskr/pubsub" + "github.com/troian/pubsub" rookexec "github.com/rook/rook/pkg/util/exec" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -13,52 +13,86 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" - providerflags "github.com/akash-network/provider/cmd/provider-services/cmd/flags" - akashv2beta1 "github.com/akash-network/provider/pkg/apis/akash.network/v2beta2" + inventory "github.com/akash-network/akash-api/go/inventory/v1" + + "github.com/akash-network/provider/tools/fromctx" ) const ( FlagAPITimeout = "api-timeout" FlagQueryTimeout = "query-timeout" - FlagAPIPort = "api-port" + FlagRESTPort = "rest-port" + FlagGRPCPort = "grpc-port" + FlagPodName = "pod-name" + FlagNodeName = "node-name" + FlagConfig = "config" ) -type ContextKey string +type storageSignal struct { + driver string + storage inventory.ClusterStorage +} -const ( - CtxKeyKubeConfig = ContextKey(providerflags.FlagKubeConfig) - CtxKeyKubeClientSet = ContextKey("kube-clientset") - CtxKeyRookClientSet = ContextKey("rook-clientset") - CtxKeyAkashClientSet = ContextKey("akash-clientset") - CtxKeyPubSub = ContextKey("pubsub") - CtxKeyLifecycle = ContextKey("lifecycle") - CtxKeyErrGroup = ContextKey("errgroup") - CtxKeyStorage = ContextKey("storage") - CtxKeyInformersFactory = ContextKey("informers-factory") -) +type respNodes struct { + res inventory.Nodes + err error +} -type resp struct { - res []akashv2beta1.InventoryClusterStorage +type respCluster struct { + res inventory.Cluster err error } -type req struct { - respCh chan resp +type reqCluster struct { + respCh chan respCluster +} + +type reqNodes struct { + respCh chan respNodes } -type querier struct { - reqch chan req +type querierNodes struct { + reqch chan reqNodes } -func newQuerier() querier { - return querier{ - reqch: make(chan req, 100), +type querierCluster struct { + reqch chan reqCluster +} + +func newQuerierCluster() querierCluster { + return querierCluster{ + reqch: make(chan reqCluster, 100), + } +} + +func newQuerierNodes() querierNodes { + return querierNodes{ + reqch: make(chan reqNodes, 100), } } -func (c *querier) Query(ctx context.Context) ([]akashv2beta1.InventoryClusterStorage, error) { - r := req{ - respCh: make(chan resp, 1), +func (c *querierCluster) Query(ctx context.Context) (inventory.Cluster, error) { + r := reqCluster{ + respCh: make(chan respCluster, 1), + } + + select { + case c.reqch <- r: + case <-ctx.Done(): + return inventory.Cluster{}, ctx.Err() + } + + select { + case rsp := <-r.respCh: + return rsp.res, rsp.err + case <-ctx.Done(): + return inventory.Cluster{}, ctx.Err() + } +} + +func (c *querierNodes) Query(ctx context.Context) (inventory.Nodes, error) { + r := reqNodes{ + respCh: make(chan respNodes, 1), } select { @@ -75,8 +109,14 @@ func (c *querier) Query(ctx context.Context) ([]akashv2beta1.InventoryClusterSto } } -type Storage interface { - Query(ctx context.Context) ([]akashv2beta1.InventoryClusterStorage, error) +type QuerierStorage interface{} + +type QuerierCluster interface { + Query(ctx context.Context) (inventory.Cluster, error) +} + +type QuerierNodes interface { + Query(ctx context.Context) (inventory.Nodes, error) } type Watcher interface { @@ -97,35 +137,36 @@ func NewRemotePodCommandExecutor(restcfg *rest.Config, clientset *kubernetes.Cli } } -func InformKubeObjects(ctx context.Context, pubsub *pubsub.PubSub, informer cache.SharedIndexInformer, topic string) { - ErrGroupFromCtx(ctx).Go(func() error { +func InformKubeObjects(ctx context.Context, pub pubsub.Publisher, informer cache.SharedIndexInformer, topic string) { + fromctx.ErrGroupFromCtx(ctx).Go(func() error { _, err := informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { - pubsub.Pub(watch.Event{ + pub.Pub(watch.Event{ Type: watch.Added, Object: obj.(runtime.Object), - }, topic) + }, []string{topic}) }, UpdateFunc: func(oldObj, newObj interface{}) { - pubsub.Pub(watch.Event{ + pub.Pub(watch.Event{ Type: watch.Modified, Object: newObj.(runtime.Object), - }, topic) + }, []string{topic}) }, DeleteFunc: func(obj interface{}) { - pubsub.Pub(watch.Event{ + pub.Pub(watch.Event{ Type: watch.Deleted, Object: obj.(runtime.Object), - }, topic) + }, []string{topic}) }, }) if err != nil { - LogFromCtx(ctx).Error(err, "couldn't register event handlers") + fromctx.LogrFromCtx(ctx).Error(err, "couldn't register event handlers") return nil } informer.Run(ctx.Done()) + return nil }) } diff --git a/operator/inventory/util.go b/operator/inventory/util.go index b5fb0d56..c8c17d50 100644 --- a/operator/inventory/util.go +++ b/operator/inventory/util.go @@ -3,41 +3,21 @@ package inventory import ( "context" - "github.com/boz/go-lifecycle" - "github.com/cskr/pubsub" - "github.com/go-logr/logr" - "golang.org/x/sync/errgroup" - "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - rookclientset "github.com/rook/rook/pkg/client/clientset/versioned" + "k8s.io/client-go/informers" - akashclientset "github.com/akash-network/provider/pkg/client/clientset/versioned" + "github.com/akash-network/provider/tools/fromctx" ) -func LogFromCtx(ctx context.Context) logr.Logger { - lg, _ := logr.FromContext(ctx) - return lg -} - -func KubeConfigFromCtx(ctx context.Context) *rest.Config { - val := ctx.Value(CtxKeyKubeConfig) - if val == nil { - panic("context does not have kubeconfig set") - } - - return val.(*rest.Config) -} - -func KubeClientFromCtx(ctx context.Context) *kubernetes.Clientset { - val := ctx.Value(CtxKeyKubeClientSet) - if val == nil { - panic("context does not have kube client set") - } - - return val.(*kubernetes.Clientset) -} +const ( + CtxKeyRookClientSet = fromctx.Key("rook-clientset") + CtxKeyStorage = fromctx.Key("storage") + CtxKeyFeatureDiscovery = fromctx.Key("feature-discovery") + CtxKeyInformersFactory = fromctx.Key("informers-factory") + CtxKeyHwInfo = fromctx.Key("hardware-info") + CtxKeyClusterState = fromctx.Key("cluster-state") + CtxKeyConfig = fromctx.Key("config") +) func InformersFactoryFromCtx(ctx context.Context) informers.SharedInformerFactory { val := ctx.Value(CtxKeyInformersFactory) @@ -57,47 +37,47 @@ func RookClientFromCtx(ctx context.Context) *rookclientset.Clientset { return val.(*rookclientset.Clientset) } -func AkashClientFromCtx(ctx context.Context) *akashclientset.Clientset { - val := ctx.Value(CtxKeyAkashClientSet) +func StorageFromCtx(ctx context.Context) []QuerierStorage { + val := ctx.Value(CtxKeyStorage) if val == nil { - panic("context does not have akash client set") + panic("context does not have storage set") } - return val.(*akashclientset.Clientset) + return val.([]QuerierStorage) } -func PubSubFromCtx(ctx context.Context) *pubsub.PubSub { - val := ctx.Value(CtxKeyPubSub) +func FeatureDiscoveryFromCtx(ctx context.Context) QuerierNodes { + val := ctx.Value(CtxKeyFeatureDiscovery) if val == nil { - panic("context does not have pubsub set") + panic("context does not have storage set") } - return val.(*pubsub.PubSub) + return val.(QuerierNodes) } -func LifecycleFromCtx(ctx context.Context) lifecycle.Lifecycle { - val := ctx.Value(CtxKeyLifecycle) +func HWInfoFromCtx(ctx context.Context) hwInfo { + val := ctx.Value(CtxKeyHwInfo) if val == nil { - panic("context does not have lifecycle set") + panic("context does not have file reader set") } - return val.(lifecycle.Lifecycle) + return val.(hwInfo) } -func ErrGroupFromCtx(ctx context.Context) *errgroup.Group { - val := ctx.Value(CtxKeyErrGroup) +func ClusterStateFromCtx(ctx context.Context) QuerierCluster { + val := ctx.Value(CtxKeyClusterState) if val == nil { - panic("context does not have errgroup set") + panic("context does not have cluster state set") } - return val.(*errgroup.Group) + return val.(QuerierCluster) } -func StorageFromCtx(ctx context.Context) []Storage { - val := ctx.Value(CtxKeyStorage) +func ConfigFromCtx(ctx context.Context) Config { + val := ctx.Value(CtxKeyConfig) if val == nil { - panic("context does not have storage set") + panic("context does not have config set") } - return val.([]Storage) + return val.(Config) } diff --git a/operator/psutil.go b/operator/psutil.go new file mode 100644 index 00000000..98f10350 --- /dev/null +++ b/operator/psutil.go @@ -0,0 +1,118 @@ +package operator + +import ( + "context" + "encoding/json" + "fmt" + "net" + "net/http" + "time" + + "github.com/gorilla/mux" + "github.com/jaypipes/ghw/pkg/cpu" + "github.com/jaypipes/ghw/pkg/gpu" + "github.com/jaypipes/ghw/pkg/memory" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +const ( + flagAPIPort = "api-port" +) + +func cmdPsutil() *cobra.Command { + cmd := &cobra.Command{ + Use: "psutil", + Short: "dump node hardware spec", + Args: cobra.ExactArgs(0), + SilenceUsage: true, + RunE: func(cmd *cobra.Command, args []string) error { + + return nil + }, + } + + cmd.AddCommand(cmdPsutilServe()) + + return cmd +} + +func cmdPsutilServe() *cobra.Command { + cmd := &cobra.Command{ + Use: "serve", + Short: "dump node hardware spec via REST", + Args: cobra.ExactArgs(0), + SilenceUsage: true, + RunE: func(cmd *cobra.Command, args []string) error { + router := mux.NewRouter() + router.HandleFunc("/cpu", cpuInfoHandler).Methods(http.MethodGet) + router.HandleFunc("/gpu", gpuHandler).Methods(http.MethodGet) + router.HandleFunc("/memory", memoryHandler).Methods(http.MethodGet) + + port := viper.GetUint16(flagAPIPort) + + srv := &http.Server{ + Addr: fmt.Sprintf(":%d", port), + Handler: router, + BaseContext: func(_ net.Listener) context.Context { + return cmd.Context() + }, + ReadHeaderTimeout: 5 * time.Second, + } + + return srv.ListenAndServe() + }, + } + + cmd.Flags().Uint16(flagAPIPort, 8081, "api port") + if err := viper.BindPFlag(flagAPIPort, cmd.Flags().Lookup(flagAPIPort)); err != nil { + panic(err) + } + + return cmd +} + +func cpuInfoHandler(w http.ResponseWriter, r *http.Request) { + res, err := cpu.New() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + writeJSON(w, res) +} + +func gpuHandler(w http.ResponseWriter, r *http.Request) { + res, err := gpu.New() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + writeJSON(w, res) +} + +func memoryHandler(w http.ResponseWriter, r *http.Request) { + res, err := memory.New() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + writeJSON(w, res) +} + +func writeJSON(w http.ResponseWriter, obj interface{}) { + bytes, err := json.Marshal(obj) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json; charset=UTF-8") + + _, err = w.Write(bytes) + if err != nil { + return + } +} diff --git a/script/inventory-test.sh b/script/inventory-test.sh new file mode 100755 index 00000000..db91ac68 --- /dev/null +++ b/script/inventory-test.sh @@ -0,0 +1,95 @@ +#!/usr/bin/env bash + +akash_api=$(go list -mod=readonly -m -f '{{ .Dir }}' github.com/akash-network/akash-api) + +if [ ! -d "${akash_api}/vendor" ]; then + echo "for script to work akash-api must be checkout locally and replace in go.work" + exit 1 +fi + +short_opts=h +long_opts=help/host:/mode: # those who take an arg END with : + +host=localhost:8081 +mode= + +while getopts ":$short_opts-:" o; do + case $o in + :) + echo >&2 "option -$OPTARG needs an argument" + continue + ;; + '?') + echo >&2 "bad option -$OPTARG" + continue + ;; + -) + o=${OPTARG%%=*} + OPTARG=${OPTARG#"$o"} + lo=/$long_opts/ + case $lo in + *"/$o"[!/:]*"/$o"[!/:]*) + echo >&2 "ambiguous option --$o" + continue + ;; + *"/$o"[:/]*) + ;; + *) + o=$o${lo#*"/$o"}; + o=${o%%[/:]*} + ;; + esac + + case $lo in + *"/$o/"*) + OPTARG= + ;; + *"/$o:/"*) + case $OPTARG in + '='*) + OPTARG=${OPTARG#=} + ;; + *) + eval "OPTARG=\$$OPTIND" + if [ "$OPTIND" -le "$#" ] && [ "$OPTARG" != -- ]; then + OPTIND=$((OPTIND + 1)) + else + echo >&2 "option --$o needs an argument" + continue + fi + ;; + esac + ;; + *) echo >&2 "unknown option --$o"; continue;; + esac + esac + case "$o" in + host) + host=$OPTARG + ;; + mode) + case "$OPTARG" in + plaintext|insecure) + ;; + *) + echo >&2 "option --$o can be plaintext|insecure" + ;; + esac + + mode=-$OPTARG + ;; + esac +done +shift "$((OPTIND - 1))" + +grpcurl \ + "$mode" \ + -use-reflection \ + -proto="${akash_api}/proto/provider/akash/inventory/v1/service.proto" \ + -proto="${akash_api}/proto/provider/akash/provider/v1/service.proto" \ + -import-path="${akash_api}/proto/provider" \ + -import-path="${akash_api}/proto/node" \ + -import-path="${akash_api}/vendor/github.com/cosmos/cosmos-sdk/third_party/proto" \ + -import-path="${akash_api}/vendor" \ + "$host" \ + "$@" diff --git a/script/setup-kube.sh b/script/setup-kube.sh index 5fb001cd..76de893b 100755 --- a/script/setup-kube.sh +++ b/script/setup-kube.sh @@ -97,7 +97,6 @@ while getopts ":$short_opts-:" o; do CRD_FILE=$OPTARG ;; esac - echo "OPT $o=$OPTARG" done shift "$((OPTIND - 1))" @@ -120,7 +119,6 @@ install_crd() { set -x kubectl apply -f "$CRD_FILE" kubectl apply -f "$rootdir/_docs/kustomize/storage/storageclass.yaml" - kubectl patch node "${KIND_NAME}-control-plane" -p '{"metadata":{"labels":{"akash.network/storageclasses":"beta2.default"}}}' } install_metrics() { diff --git a/script/tools.sh b/script/tools.sh index 2d92baa3..5807313c 100755 --- a/script/tools.sh +++ b/script/tools.sh @@ -8,13 +8,24 @@ gomod="$SCRIPT_DIR/../go.mod" function get_gotoolchain() { local gotoolchain local goversion + local local_goversion gotoolchain=$(grep -E '^toolchain go[0-9]{1,}.[0-9]{1,}.[0-9]{1,}$' < "$gomod" | cut -d ' ' -f 2 | tr -d '\n') + goversion=$(grep -E '^go [0-9]{1,}.[0-9]{1,}(.[0-9]{1,})?$' < "$gomod" | cut -d ' ' -f 2 | tr -d '\n') if [[ ${gotoolchain} == "" ]]; then # determine go toolchain from go version in go.mod if which go > /dev/null 2>&1 ; then - goversion=$(GOTOOLCHAIN=local go version | cut -d ' ' -f 3 | sed 's/go*//' | tr -d '\n') + local_goversion=$(GOTOOLCHAIN=local go version | cut -d ' ' -f 3 | sed 's/go*//' | tr -d '\n') + if [[ $($SEMVER compare "v$local_goversion" v"$goversion") -ge 0 ]]; then + goversion=$local_goversion + else + local_goversion= + fi + fi + + if [[ "$local_goversion" == "" ]]; then + goversion=$(curl -s "https://go.dev/dl/?mode=json&include=all" | jq -r --arg regexp "^go$goversion" '.[] | select(.stable == true) | select(.version | match($regexp)) | .version' | head -n 1 | sed -e s/^go//) fi if [[ $goversion != "" ]] && [[ $($SEMVER compare "v$goversion" v1.21.0) -ge 0 ]]; then diff --git a/service.go b/service.go index b297f6bc..4e467f09 100644 --- a/service.go +++ b/service.go @@ -5,12 +5,15 @@ import ( "github.com/boz/go-lifecycle" "github.com/pkg/errors" + tpubsub "github.com/troian/pubsub" "github.com/cosmos/cosmos-sdk/client" sdk "github.com/cosmos/cosmos-sdk/types" bankTypes "github.com/cosmos/cosmos-sdk/x/bank/types" dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" + provider "github.com/akash-network/akash-api/go/provider/v1" + "github.com/akash-network/node/pubsub" "github.com/akash-network/provider/bidengine" @@ -21,6 +24,8 @@ import ( "github.com/akash-network/provider/manifest" "github.com/akash-network/provider/operator/waiter" "github.com/akash-network/provider/session" + "github.com/akash-network/provider/tools/fromctx" + ptypes "github.com/akash-network/provider/types" ) // ValidateClient is the interface to check if provider will bid on given groupspec @@ -33,6 +38,7 @@ type ValidateClient interface { //go:generate mockery --name StatusClient type StatusClient interface { Status(context.Context) (*Status, error) + StatusV1(ctx context.Context) (*provider.Status, error) } //go:generate mockery --name Client @@ -150,6 +156,7 @@ func NewService(ctx context.Context, go svc.lc.WatchContext(ctx) go svc.run() + go svc.statusRun() return svc, nil } @@ -216,6 +223,29 @@ func (s *service) Status(ctx context.Context) (*Status, error) { }, nil } +func (s *service) StatusV1(ctx context.Context) (*provider.Status, error) { + cluster, err := s.cluster.StatusV1(ctx) + if err != nil { + return nil, err + } + bidengine, err := s.bidengine.StatusV1(ctx) + if err != nil { + return nil, err + } + manifest, err := s.manifest.StatusV1(ctx) + if err != nil { + return nil, err + } + return &provider.Status{ + Cluster: cluster, + BidEngine: bidengine, + Manifest: manifest, + PublicHostnames: []string{ + s.config.ClusterPublicHostname, + }, + }, nil +} + func (s *service) Validate(ctx context.Context, owner sdk.Address, gspec dtypes.GroupSpec) (ValidateGroupSpecResult, error) { // FUTURE - pass owner here req := bidengine.Request{ @@ -271,6 +301,45 @@ func (s *service) run() { s.session.Log().Info("shutdown complete") } +func (s *service) statusRun() { + bus := fromctx.PubSubFromCtx(s.ctx) + + events := bus.Sub( + ptypes.PubSubTopicClusterStatus, + ptypes.PubSubTopicBidengineStatus, + ptypes.PubSubTopicManifestStatus, + ) + + defer bus.Unsub(events) + + status := provider.Status{ + PublicHostnames: []string{ + s.config.ClusterPublicHostname, + }, + } + +loop: + for { + select { + case <-s.lc.ShutdownRequest(): + return + case evt := <-events: + switch obj := evt.(type) { + case provider.ClusterStatus: + status.Cluster = &obj + case provider.BidEngineStatus: + status.BidEngine = &obj + case provider.ManifestStatus: + status.Manifest = &obj + default: + continue loop + } + + bus.Pub(status, []string{ptypes.PubSubTopicProviderStatus}, tpubsub.WithRetain()) + } + } +} + type reservation struct { resources dtypes.ResourceGroup adjustedResources dtypes.ResourceUnits diff --git a/tools/fromctx/context.go b/tools/fromctx/context.go new file mode 100644 index 00000000..65da68c6 --- /dev/null +++ b/tools/fromctx/context.go @@ -0,0 +1,156 @@ +package fromctx + +import ( + "context" + + "github.com/boz/go-lifecycle" + "github.com/go-logr/logr" + "github.com/spf13/cobra" + "github.com/troian/pubsub" + "golang.org/x/sync/errgroup" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + + cmblog "github.com/tendermint/tendermint/libs/log" + + providerflags "github.com/akash-network/provider/cmd/provider-services/cmd/flags" + akashclientset "github.com/akash-network/provider/pkg/client/clientset/versioned" +) + +type Key string + +const ( + CtxKeyKubeConfig = Key(providerflags.FlagKubeConfig) + CtxKeyKubeClientSet = Key("kube-clientset") + CtxKeyAkashClientSet = Key("akash-clientset") + CtxKeyPubSub = Key("pubsub") + CtxKeyLifecycle = Key("lifecycle") + CtxKeyErrGroup = Key("errgroup") + CtxKeyLogc = Key("logc") + CtxKeyStartupCh = Key("startup-ch") +) + +type options struct { + logName Key +} + +type LogcOption func(*options) error + +type dummyLogger struct{} + +func (l *dummyLogger) Debug(_ string, _ ...interface{}) {} +func (l *dummyLogger) Info(_ string, _ ...interface{}) {} +func (l *dummyLogger) Error(_ string, _ ...interface{}) {} +func (l *dummyLogger) With(_ ...interface{}) cmblog.Logger { + return &dummyLogger{} +} + +func CmdSetContextValue(cmd *cobra.Command, key, val interface{}) { + cmd.SetContext(context.WithValue(cmd.Context(), key, val)) +} + +// WithLogc add logger object to the context +// key defaults to the "log" +// use WithLogName("") to set custom key +func WithLogc(ctx context.Context, lg cmblog.Logger, opts ...LogcOption) context.Context { + opt, _ := applyOptions(opts...) + + ctx = context.WithValue(ctx, opt.logName, lg) + + return ctx +} + +func LogcFromCtx(ctx context.Context, opts ...LogcOption) cmblog.Logger { + opt, _ := applyOptions(opts...) + + var logger cmblog.Logger + if lg, valid := ctx.Value(opt.logName).(cmblog.Logger); valid { + logger = lg + } else { + logger = &dummyLogger{} + } + + return logger +} + +func LogrFromCtx(ctx context.Context) logr.Logger { + lg, _ := logr.FromContext(ctx) + return lg +} + +func StartupChFromCtx(ctx context.Context) chan<- struct{} { + val := ctx.Value(CtxKeyStartupCh) + if val == nil { + panic("context does not have startup channel set") + } + + return val.(chan<- struct{}) +} + +func KubeConfigFromCtx(ctx context.Context) *rest.Config { + val := ctx.Value(CtxKeyKubeConfig) + if val == nil { + panic("context does not have kubeconfig set") + } + + return val.(*rest.Config) +} + +func KubeClientFromCtx(ctx context.Context) *kubernetes.Clientset { + val := ctx.Value(CtxKeyKubeClientSet) + if val == nil { + panic("context does not have kube client set") + } + + return val.(*kubernetes.Clientset) +} +func AkashClientFromCtx(ctx context.Context) *akashclientset.Clientset { + val := ctx.Value(CtxKeyAkashClientSet) + if val == nil { + panic("context does not have akash client set") + } + + return val.(*akashclientset.Clientset) +} + +func LifecycleFromCtx(ctx context.Context) lifecycle.Lifecycle { + val := ctx.Value(CtxKeyLifecycle) + if val == nil { + panic("context does not have lifecycle set") + } + + return val.(lifecycle.Lifecycle) +} + +func ErrGroupFromCtx(ctx context.Context) *errgroup.Group { + val := ctx.Value(CtxKeyErrGroup) + if val == nil { + panic("context does not have errgroup set") + } + + return val.(*errgroup.Group) +} + +func PubSubFromCtx(ctx context.Context) pubsub.PubSub { + val := ctx.Value(CtxKeyPubSub) + if val == nil { + panic("context does not have pubsub set") + } + + return val.(pubsub.PubSub) +} + +func applyOptions(opts ...LogcOption) (options, error) { + obj := &options{} + for _, opt := range opts { + if err := opt(obj); err != nil { + return options{}, err + } + } + + if obj.logName == "" { + obj.logName = CtxKeyLogc + } + + return *obj, nil +} diff --git a/types/types.go b/types/types.go new file mode 100644 index 00000000..22ab5ab3 --- /dev/null +++ b/types/types.go @@ -0,0 +1,10 @@ +package types + +const ( + PubSubTopicLeasesStatus = "leases-status" + PubSubTopicProviderStatus = "provider-status" + PubSubTopicClusterStatus = "cluster-status" + PubSubTopicBidengineStatus = "bidengine-status" + PubSubTopicManifestStatus = "manifest-status" + PubSubTopicInventoryStatus = "inventory-status" +)