diff --git a/cmd/machine-config-controller/start.go b/cmd/machine-config-controller/start.go
index 2cf90a31cd..32180e5772 100644
--- a/cmd/machine-config-controller/start.go
+++ b/cmd/machine-config-controller/start.go
@@ -7,7 +7,7 @@ import (
"os"
"time"
- configv1 "github.com/openshift/api/config/v1"
+ features "github.com/openshift/api/features"
"github.com/openshift/library-go/pkg/operator/configobserver/featuregates"
"github.com/openshift/machine-config-operator/cmd/common"
"github.com/openshift/machine-config-operator/internal/clients"
@@ -98,14 +98,14 @@ func runStartCmd(_ *cobra.Command, _ []string) {
select {
case <-ctrlctx.FeatureGateAccess.InitialFeatureGatesObserved():
- features, err := ctrlctx.FeatureGateAccess.CurrentFeatureGates()
+ fg, err := ctrlctx.FeatureGateAccess.CurrentFeatureGates()
if err != nil {
klog.Fatalf("unable to get initial features: %v", err)
}
- enabled, disabled := getEnabledDisabledFeatures(features)
+ enabled, disabled := getEnabledDisabledFeatures(fg)
klog.Infof("FeatureGates initialized: enabled=%v disabled=%v", enabled, disabled)
- if features.Enabled(configv1.FeatureGatePinnedImages) && features.Enabled(configv1.FeatureGateMachineConfigNodes) {
+ if fg.Enabled(features.FeatureGatePinnedImages) && fg.Enabled(features.FeatureGateMachineConfigNodes) {
pinnedImageSet := pinnedimageset.New(
ctrlctx.InformerFactory.Machineconfiguration().V1alpha1().PinnedImageSets(),
ctrlctx.InformerFactory.Machineconfiguration().V1().MachineConfigPools(),
diff --git a/cmd/machine-config-daemon/start.go b/cmd/machine-config-daemon/start.go
index 5cff83bc88..fb3585cc0e 100644
--- a/cmd/machine-config-daemon/start.go
+++ b/cmd/machine-config-daemon/start.go
@@ -11,7 +11,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/client-go/tools/clientcmd"
- configv1 "github.com/openshift/api/config/v1"
+ features "github.com/openshift/api/features"
"github.com/openshift/machine-config-operator/internal/clients"
ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common"
"github.com/openshift/machine-config-operator/pkg/daemon"
@@ -212,8 +212,8 @@ func runStartCmd(_ *cobra.Command, _ []string) {
klog.Fatalf("Could not get FG: %v", err)
} else {
klog.Infof("FeatureGates initialized: knownFeatureGates=%v", featureGates.KnownFeatures())
- if featureGates.Enabled(configv1.FeatureGatePinnedImages) && featureGates.Enabled(configv1.FeatureGateMachineConfigNodes) {
- klog.Infof("Feature enabled: %s", configv1.FeatureGatePinnedImages)
+ if featureGates.Enabled(features.FeatureGatePinnedImages) && featureGates.Enabled(features.FeatureGateMachineConfigNodes) {
+ klog.Infof("Feature enabled: %s", features.FeatureGatePinnedImages)
criClient, err := cri.NewClient(ctx, constants.DefaultCRIOSocketPath)
if err != nil {
klog.Fatalf("Failed to initialize CRI client: %v", err)
diff --git a/go.mod b/go.mod
index 3f2c77a5c2..79623c3fc5 100644
--- a/go.mod
+++ b/go.mod
@@ -27,10 +27,9 @@ require (
github.com/google/renameio v0.1.0
github.com/imdario/mergo v0.3.13
github.com/opencontainers/go-digest v1.0.0
- github.com/openshift/api v0.0.0-20240422085825-2624175e9673
+ github.com/openshift/api v0.0.0-20240425081546-8203151f085f
github.com/openshift/client-go v0.0.0-20240422164335-6c851f4919dd
- github.com/openshift/cluster-config-operator v0.0.0-alpha.0.0.20231213185242-e4dc676febfe
- github.com/openshift/library-go v0.0.0-20240412173449-eb2f24c36528
+ github.com/openshift/library-go v0.0.0-20240424194921-cb8aac942b79
github.com/openshift/runtime-utils v0.0.0-20230921210328-7bdb5b9c177b
github.com/prometheus/client_golang v1.17.0
github.com/spf13/cobra v1.8.0
diff --git a/go.sum b/go.sum
index 4747ab0ec1..c810e69b93 100644
--- a/go.sum
+++ b/go.sum
@@ -689,16 +689,14 @@ github.com/opencontainers/runc v1.1.10 h1:EaL5WeO9lv9wmS6SASjszOeQdSctvpbu0DdBQB
github.com/opencontainers/runc v1.1.10/go.mod h1:+/R6+KmDlh+hOO8NkjmgkG9Qzvypzk0yXxAPYYR65+M=
github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg=
github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/openshift/api v0.0.0-20240422085825-2624175e9673 h1:D4qblu6z2A92fh7u9Nt1YskDtu+GySKiYP/D3tMWQ6A=
-github.com/openshift/api v0.0.0-20240422085825-2624175e9673/go.mod h1:CxgbWAlvu2iQB0UmKTtRu1YfepRg1/vJ64n2DlIEVz4=
+github.com/openshift/api v0.0.0-20240425081546-8203151f085f h1:MH6RaWREjGgf1NUcBj0iJIGXd0e6jV8jRhV4Mx/HAf4=
+github.com/openshift/api v0.0.0-20240425081546-8203151f085f/go.mod h1:CxgbWAlvu2iQB0UmKTtRu1YfepRg1/vJ64n2DlIEVz4=
github.com/openshift/client-go v0.0.0-20240422164335-6c851f4919dd h1:z5TPsTaB8Zzvv9fK/kVB6X+FG1GtwM56WfoanhlbyyQ=
github.com/openshift/client-go v0.0.0-20240422164335-6c851f4919dd/go.mod h1:OC07uJXbaW/s21N6XDucROlmfUOhMXD5OrY3ZN3DmiM=
-github.com/openshift/cluster-config-operator v0.0.0-alpha.0.0.20231213185242-e4dc676febfe h1:wDQtyIbJJIoif2Ux0S+9MJWIWEGV0oG+iLm8WtqwdSw=
-github.com/openshift/cluster-config-operator v0.0.0-alpha.0.0.20231213185242-e4dc676febfe/go.mod h1:SGUtv1pKZSzSVr2YCxXFvhE+LbGfI+vcetEhNicKayw=
github.com/openshift/kube-openapi v0.0.0-20230816122517-ffc8f001abb0 h1:GPlAy197Jkr+D0T2FNWanamraTdzS/r9ZkT29lxvHaA=
github.com/openshift/kube-openapi v0.0.0-20230816122517-ffc8f001abb0/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM=
-github.com/openshift/library-go v0.0.0-20240412173449-eb2f24c36528 h1:vnLKZUSW1aPv7Pd6+QYjDUU+/8z2MSBacU38cAlNMPA=
-github.com/openshift/library-go v0.0.0-20240412173449-eb2f24c36528/go.mod h1:m/HsttSi90vSixwoy5mPUBHcZid2YRw/QbsLErLxF9s=
+github.com/openshift/library-go v0.0.0-20240424194921-cb8aac942b79 h1:4iJrZMloPJlHfwo7NE8lWEXV/Ybg3RpV4LRv/ufd10g=
+github.com/openshift/library-go v0.0.0-20240424194921-cb8aac942b79/go.mod h1:lFwyRj0XjUf25Da3Q00y+KuaxCWTJ6YzYPDX1+96nco=
github.com/openshift/runtime-utils v0.0.0-20230921210328-7bdb5b9c177b h1:oXzC1N6E9gw76/WH2gEA8GEHvuq09wuVQ9GoCuR8GF4=
github.com/openshift/runtime-utils v0.0.0-20230921210328-7bdb5b9c177b/go.mod h1:l9/qeKZuAmYUMl0yicJlbkPGDsIycGhwxOvOAWyaP0E=
github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw=
diff --git a/install/0000_10_config-operator_01_clusterimagepolicies-DevPreviewNoUpgrade.crd.yaml b/install/0000_10_config-operator_01_clusterimagepolicies-DevPreviewNoUpgrade.crd.yaml
new file mode 100644
index 0000000000..79c49e0580
--- /dev/null
+++ b/install/0000_10_config-operator_01_clusterimagepolicies-DevPreviewNoUpgrade.crd.yaml
@@ -0,0 +1,398 @@
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ api-approved.openshift.io: https://github.com/openshift/api/pull/1457
+ api.openshift.io/merged-by-featuregates: "true"
+ include.release.openshift.io/ibm-cloud-managed: "true"
+ include.release.openshift.io/self-managed-high-availability: "true"
+ release.openshift.io/feature-set: DevPreviewNoUpgrade
+ name: clusterimagepolicies.config.openshift.io
+spec:
+ group: config.openshift.io
+ names:
+ kind: ClusterImagePolicy
+ listKind: ClusterImagePolicyList
+ plural: clusterimagepolicies
+ singular: clusterimagepolicy
+ scope: Cluster
+ versions:
+ - name: v1alpha1
+ schema:
+ openAPIV3Schema:
+ description: "ClusterImagePolicy holds cluster-wide configuration for image
+ signature verification \n Compatibility level 4: No compatibility is provided,
+ the API can change at any point for any reason. These capabilities should
+ not be used by applications needing long term support."
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: spec contains the configuration for the cluster image policy.
+ properties:
+ policy:
+ description: policy contains configuration to allow scopes to be verified,
+ and defines how images not matching the verification policy will
+ be treated.
+ properties:
+ rootOfTrust:
+ description: rootOfTrust specifies the root of trust for the policy.
+ properties:
+ fulcioCAWithRekor:
+ description: 'fulcioCAWithRekor defines the root of trust
+ based on the Fulcio certificate and the Rekor public key.
+ For more information about Fulcio and Rekor, please refer
+ to the document at: https://github.com/sigstore/fulcio and
+ https://github.com/sigstore/rekor'
+ properties:
+ fulcioCAData:
+ description: fulcioCAData contains inline base64-encoded
+ data for the PEM format fulcio CA. fulcioCAData must
+ be at most 8192 characters.
+ format: byte
+ maxLength: 8192
+ type: string
+ fulcioSubject:
+ description: fulcioSubject specifies OIDC issuer and the
+ email of the Fulcio authentication configuration.
+ properties:
+ oidcIssuer:
+ description: 'oidcIssuer contains the expected OIDC
+ issuer. It will be verified that the Fulcio-issued
+ certificate contains a (Fulcio-defined) certificate
+ extension pointing at this OIDC issuer URL. When
+ Fulcio issues certificates, it includes a value
+ based on an URL inside the client-provided ID token.
+ Example: "https://expected.OIDC.issuer/"'
+ type: string
+ x-kubernetes-validations:
+ - message: oidcIssuer must be a valid URL
+ rule: isURL(self)
+ signedEmail:
+ description: 'signedEmail holds the email address
+ the the Fulcio certificate is issued for. Example:
+ "expected-signing-user@example.com"'
+ type: string
+ x-kubernetes-validations:
+ - message: invalid email address
+ rule: self.matches('^\\S+@\\S+$')
+ required:
+ - oidcIssuer
+ - signedEmail
+ type: object
+ rekorKeyData:
+ description: rekorKeyData contains inline base64-encoded
+ data for the PEM format from the Rekor public key. rekorKeyData
+ must be at most 8192 characters.
+ format: byte
+ maxLength: 8192
+ type: string
+ required:
+ - fulcioCAData
+ - fulcioSubject
+ - rekorKeyData
+ type: object
+ policyType:
+ description: policyType serves as the union's discriminator.
+ Users are required to assign a value to this field, choosing
+ one of the policy types that define the root of trust. "PublicKey"
+ indicates that the policy relies on a sigstore publicKey
+ and may optionally use a Rekor verification. "FulcioCAWithRekor"
+ indicates that the policy is based on the Fulcio certification
+ and incorporates a Rekor verification.
+ enum:
+ - PublicKey
+ - FulcioCAWithRekor
+ type: string
+ publicKey:
+ description: publicKey defines the root of trust based on
+ a sigstore public key.
+ properties:
+ keyData:
+ description: keyData contains inline base64-encoded data
+ for the PEM format public key. KeyData must be at most
+ 8192 characters.
+ format: byte
+ maxLength: 8192
+ type: string
+ rekorKeyData:
+ description: rekorKeyData contains inline base64-encoded
+ data for the PEM format from the Rekor public key. rekorKeyData
+ must be at most 8192 characters.
+ format: byte
+ maxLength: 8192
+ type: string
+ required:
+ - keyData
+ type: object
+ required:
+ - policyType
+ type: object
+ x-kubernetes-validations:
+ - message: publicKey is required when policyType is PublicKey,
+ and forbidden otherwise
+ rule: 'has(self.policyType) && self.policyType == ''PublicKey''
+ ? has(self.publicKey) : !has(self.publicKey)'
+ - message: fulcioCAWithRekor is required when policyType is FulcioCAWithRekor,
+ and forbidden otherwise
+ rule: 'has(self.policyType) && self.policyType == ''FulcioCAWithRekor''
+ ? has(self.fulcioCAWithRekor) : !has(self.fulcioCAWithRekor)'
+ signedIdentity:
+ description: signedIdentity specifies what image identity the
+ signature claims about the image. The required matchPolicy field
+ specifies the approach used in the verification process to verify
+ the identity in the signature and the actual image identity,
+ the default matchPolicy is "MatchRepoDigestOrExact".
+ properties:
+ exactRepository:
+ description: exactRepository is required if matchPolicy is
+ set to "ExactRepository".
+ properties:
+ repository:
+ description: repository is the reference of the image
+ identity to be matched. The value should be a repository
+ name (by omitting the tag or digest) in a registry implementing
+ the "Docker Registry HTTP API V2". For example, docker.io/library/busybox
+ maxLength: 512
+ type: string
+ x-kubernetes-validations:
+ - message: invalid repository or prefix in the signedIdentity,
+ should not include the tag or digest
+ rule: 'self.matches(''.*:([\\w][\\w.-]{0,127})$'')?
+ self.matches(''^(localhost:[0-9]+)$''): true'
+ - message: invalid repository or prefix in the signedIdentity
+ rule: self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$')
+ required:
+ - repository
+ type: object
+ matchPolicy:
+ description: matchPolicy sets the type of matching to be used.
+ Valid values are "MatchRepoDigestOrExact", "MatchRepository",
+ "ExactRepository", "RemapIdentity". When omitted, the default
+ value is "MatchRepoDigestOrExact". If set matchPolicy to
+ ExactRepository, then the exactRepository must be specified.
+ If set matchPolicy to RemapIdentity, then the remapIdentity
+ must be specified. "MatchRepoDigestOrExact" means that the
+ identity in the signature must be in the same repository
+ as the image identity if the image identity is referenced
+ by a digest. Otherwise, the identity in the signature must
+ be the same as the image identity. "MatchRepository" means
+ that the identity in the signature must be in the same repository
+ as the image identity. "ExactRepository" means that the
+ identity in the signature must be in the same repository
+ as a specific identity specified by "repository". "RemapIdentity"
+ means that the signature must be in the same as the remapped
+ image identity. Remapped image identity is obtained by replacing
+ the "prefix" with the specified “signedPrefix” if the the
+ image identity matches the specified remapPrefix.
+ enum:
+ - MatchRepoDigestOrExact
+ - MatchRepository
+ - ExactRepository
+ - RemapIdentity
+ type: string
+ remapIdentity:
+ description: remapIdentity is required if matchPolicy is set
+ to "RemapIdentity".
+ properties:
+ prefix:
+ description: prefix is the prefix of the image identity
+ to be matched. If the image identity matches the specified
+ prefix, that prefix is replaced by the specified “signedPrefix”
+ (otherwise it is used as unchanged and no remapping
+ takes place). This useful when verifying signatures
+ for a mirror of some other repository namespace that
+ preserves the vendor’s repository structure. The prefix
+ and signedPrefix values can be either host[:port] values
+ (matching exactly the same host[:port], string), repository
+ namespaces, or repositories (i.e. they must not contain
+ tags/digests), and match as prefixes of the fully expanded
+ form. For example, docker.io/library/busybox (not busybox)
+ to specify that single repository, or docker.io/library
+ (not an empty string) to specify the parent namespace
+ of docker.io/library/busybox.
+ maxLength: 512
+ type: string
+ x-kubernetes-validations:
+ - message: invalid repository or prefix in the signedIdentity,
+ should not include the tag or digest
+ rule: 'self.matches(''.*:([\\w][\\w.-]{0,127})$'')?
+ self.matches(''^(localhost:[0-9]+)$''): true'
+ - message: invalid repository or prefix in the signedIdentity
+ rule: self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$')
+ signedPrefix:
+ description: signedPrefix is the prefix of the image identity
+ to be matched in the signature. The format is the same
+ as "prefix". The values can be either host[:port] values
+ (matching exactly the same host[:port], string), repository
+ namespaces, or repositories (i.e. they must not contain
+ tags/digests), and match as prefixes of the fully expanded
+ form. For example, docker.io/library/busybox (not busybox)
+ to specify that single repository, or docker.io/library
+ (not an empty string) to specify the parent namespace
+ of docker.io/library/busybox.
+ maxLength: 512
+ type: string
+ x-kubernetes-validations:
+ - message: invalid repository or prefix in the signedIdentity,
+ should not include the tag or digest
+ rule: 'self.matches(''.*:([\\w][\\w.-]{0,127})$'')?
+ self.matches(''^(localhost:[0-9]+)$''): true'
+ - message: invalid repository or prefix in the signedIdentity
+ rule: self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$')
+ required:
+ - prefix
+ - signedPrefix
+ type: object
+ required:
+ - matchPolicy
+ type: object
+ x-kubernetes-validations:
+ - message: exactRepository is required when matchPolicy is ExactRepository,
+ and forbidden otherwise
+ rule: '(has(self.matchPolicy) && self.matchPolicy == ''ExactRepository'')
+ ? has(self.exactRepository) : !has(self.exactRepository)'
+ - message: remapIdentity is required when matchPolicy is RemapIdentity,
+ and forbidden otherwise
+ rule: '(has(self.matchPolicy) && self.matchPolicy == ''RemapIdentity'')
+ ? has(self.remapIdentity) : !has(self.remapIdentity)'
+ required:
+ - rootOfTrust
+ type: object
+ scopes:
+ description: 'scopes defines the list of image identities assigned
+ to a policy. Each item refers to a scope in a registry implementing
+ the "Docker Registry HTTP API V2". Scopes matching individual images
+ are named Docker references in the fully expanded form, either using
+ a tag or digest. For example, docker.io/library/busybox:latest (not
+ busybox:latest). More general scopes are prefixes of individual-image
+ scopes, and specify a repository (by omitting the tag or digest),
+ a repository namespace, or a registry host (by only specifying the
+ host name and possibly a port number) or a wildcard expression starting
+ with `*.`, for matching all subdomains (not including a port number).
+ Wildcards are only supported for subdomain matching, and may not
+ be used in the middle of the host, i.e. *.example.com is a valid
+ case, but example*.*.com is not. Please be aware that the scopes
+ should not be nested under the repositories of OpenShift Container
+ Platform images. If configured, the policies for OpenShift Container
+ Platform repositories will not be in effect. For additional details
+ about the format, please refer to the document explaining the docker
+ transport field, which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker'
+ items:
+ maxLength: 512
+ type: string
+ x-kubernetes-validations:
+ - message: invalid image scope format, scope must contain a fully
+ qualified domain name or 'localhost'
+ rule: 'size(self.split(''/'')[0].split(''.'')) == 1 ? self.split(''/'')[0].split(''.'')[0].split('':'')[0]
+ == ''localhost'' : true'
+ - message: invalid image scope with wildcard, a wildcard can only
+ be at the start of the domain and is only supported for subdomain
+ matching, not path matching
+ rule: 'self.contains(''*'') ? self.matches(''^\\*(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$'')
+ : true'
+ - message: invalid repository namespace or image specification in
+ the image scope
+ rule: '!self.contains(''*'') ? self.matches(''^((((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?)(?::([\\w][\\w.-]{0,127}))?(?:@([A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}))?$'')
+ : true'
+ maxItems: 256
+ type: array
+ x-kubernetes-list-type: set
+ required:
+ - policy
+ - scopes
+ type: object
+ status:
+ description: status contains the observed state of the resource.
+ properties:
+ conditions:
+ description: conditions provide details on the status of this API
+ Resource.
+ items:
+ description: "Condition contains details for one aspect of the current
+ state of this API Resource. --- This struct is intended for direct
+ use as an array at the field path .status.conditions. For example,
+ \n type FooStatus struct{ // Represents the observations of a
+ foo's current state. // Known .status.conditions.type are: \"Available\",
+ \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge
+ // +listType=map // +listMapKey=type Conditions []metav1.Condition
+ `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\"
+ protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }"
+ properties:
+ lastTransitionTime:
+ description: lastTransitionTime is the last time the condition
+ transitioned from one status to another. This should be when
+ the underlying condition changed. If that is not known, then
+ using the time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: message is a human readable message indicating
+ details about the transition. This may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: observedGeneration represents the .metadata.generation
+ that the condition was set based upon. For instance, if .metadata.generation
+ is currently 12, but the .status.conditions[x].observedGeneration
+ is 9, the condition is out of date with respect to the current
+ state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: reason contains a programmatic identifier indicating
+ the reason for the condition's last transition. Producers
+ of specific condition types may define expected values and
+ meanings for this field, and whether the values are considered
+ a guaranteed API. The value should be a CamelCase string.
+ This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True, False, Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in foo.example.com/CamelCase.
+ --- Many .condition.type values are consistent across resources
+ like Available, but because arbitrary conditions can be useful
+ (see .node.status.conditions), the ability to deconflict is
+ important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - type
+ x-kubernetes-list-type: map
+ type: object
+ required:
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
diff --git a/install/0000_80_machine-config_01_machineconfigurations-CustomNoUpgrade.crd.yaml b/install/0000_80_machine-config_01_machineconfigurations-CustomNoUpgrade.crd.yaml
index 7acdf0d4b8..dbeb9e9293 100644
--- a/install/0000_80_machine-config_01_machineconfigurations-CustomNoUpgrade.crd.yaml
+++ b/install/0000_80_machine-config_01_machineconfigurations-CustomNoUpgrade.crd.yaml
@@ -308,11 +308,11 @@ spec:
type:
description: type represents the commands that will
be carried out if this NodeDisruptionPolicySpecActionType
- is executed Valid value are Reboot, Drain, Reload,
- Restart, DaemonReload, None and Special reload/restart
- requires a corresponding service target specified
- in the reload/restart field. Other values require
- no further configuration
+ is executed Valid values are Reboot, Drain, Reload,
+ Restart, DaemonReload and None. reload/restart requires
+ a corresponding service target specified in the
+ reload/restart field. Other values require no further
+ configuration
enum:
- Reboot
- Drain
@@ -448,11 +448,10 @@ spec:
type:
description: type represents the commands that will
be carried out if this NodeDisruptionPolicySpecActionType
- is executed Valid value are Reboot, Drain, Reload,
- Restart, DaemonReload, None and Special reload/restart
- requires a corresponding service target specified
- in the reload/restart field. Other values require
- no further configuration
+ is executed Valid values are Reboot, Drain, Reload,
+ Restart, DaemonReload and None. reload/restart requires
+ a corresponding service target specified in the reload/restart
+ field. Other values require no further configuration
enum:
- Reboot
- Drain
@@ -581,11 +580,11 @@ spec:
type:
description: type represents the commands that will
be carried out if this NodeDisruptionPolicySpecActionType
- is executed Valid value are Reboot, Drain, Reload,
- Restart, DaemonReload, None and Special reload/restart
- requires a corresponding service target specified
- in the reload/restart field. Other values require
- no further configuration
+ is executed Valid values are Reboot, Drain, Reload,
+ Restart, DaemonReload and None. reload/restart requires
+ a corresponding service target specified in the
+ reload/restart field. Other values require no further
+ configuration
enum:
- Reboot
- Drain
@@ -865,11 +864,11 @@ spec:
type:
description: type represents the commands that
will be carried out if this NodeDisruptionPolicyStatusActionType
- is executed Valid value are Reboot, Drain, Reload,
- Restart, DaemonReload, None and Special reload/restart
- requires a corresponding service target specified
- in the reload/restart field. Other values require
- no further configuration
+ is executed Valid values are Reboot, Drain,
+ Reload, Restart, DaemonReload, None and Special.
+ reload/restart requires a corresponding service
+ target specified in the reload/restart field.
+ Other values require no further configuration
enum:
- Reboot
- Drain
@@ -1004,8 +1003,8 @@ spec:
type:
description: type represents the commands that will
be carried out if this NodeDisruptionPolicyStatusActionType
- is executed Valid value are Reboot, Drain, Reload,
- Restart, DaemonReload, None and Special reload/restart
+ is executed Valid values are Reboot, Drain, Reload,
+ Restart, DaemonReload, None and Special. reload/restart
requires a corresponding service target specified
in the reload/restart field. Other values require
no further configuration
@@ -1140,11 +1139,11 @@ spec:
type:
description: type represents the commands that
will be carried out if this NodeDisruptionPolicyStatusActionType
- is executed Valid value are Reboot, Drain, Reload,
- Restart, DaemonReload, None and Special reload/restart
- requires a corresponding service target specified
- in the reload/restart field. Other values require
- no further configuration
+ is executed Valid values are Reboot, Drain,
+ Reload, Restart, DaemonReload, None and Special.
+ reload/restart requires a corresponding service
+ target specified in the reload/restart field.
+ Other values require no further configuration
enum:
- Reboot
- Drain
diff --git a/install/0000_80_machine-config_01_machineconfigurations-DevPreviewNoUpgrade.crd.yaml b/install/0000_80_machine-config_01_machineconfigurations-DevPreviewNoUpgrade.crd.yaml
new file mode 100644
index 0000000000..4f47e4631f
--- /dev/null
+++ b/install/0000_80_machine-config_01_machineconfigurations-DevPreviewNoUpgrade.crd.yaml
@@ -0,0 +1,1292 @@
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ api-approved.openshift.io: https://github.com/openshift/api/pull/1453
+ api.openshift.io/merged-by-featuregates: "true"
+ include.release.openshift.io/ibm-cloud-managed: "true"
+ include.release.openshift.io/self-managed-high-availability: "true"
+ release.openshift.io/feature-set: DevPreviewNoUpgrade
+ name: machineconfigurations.operator.openshift.io
+spec:
+ group: operator.openshift.io
+ names:
+ kind: MachineConfiguration
+ listKind: MachineConfigurationList
+ plural: machineconfigurations
+ singular: machineconfiguration
+ scope: Cluster
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ description: "MachineConfiguration provides information to configure an operator
+ to manage Machine Configuration. \n Compatibility level 1: Stable within
+ a major release for a minimum of 12 months or 3 minor releases (whichever
+ is longer)."
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: spec is the specification of the desired behavior of the
+ Machine Config Operator
+ properties:
+ failedRevisionLimit:
+ description: failedRevisionLimit is the number of failed static pod
+ installer revisions to keep on disk and in the api -1 = unlimited,
+ 0 or unset = 5 (default)
+ format: int32
+ type: integer
+ forceRedeploymentReason:
+ description: forceRedeploymentReason can be used to force the redeployment
+ of the operand by providing a unique string. This provides a mechanism
+ to kick a previously failed deployment and provide a reason why
+ you think it will work this time instead of failing again on the
+ same config.
+ type: string
+ logLevel:
+ default: Normal
+ description: "logLevel is an intent based logging for an overall component.
+ \ It does not give fine grained control, but it is a simple way
+ to manage coarse grained logging choices that operators have to
+ interpret for their operands. \n Valid values are: \"Normal\", \"Debug\",
+ \"Trace\", \"TraceAll\". Defaults to \"Normal\"."
+ enum:
+ - ""
+ - Normal
+ - Debug
+ - Trace
+ - TraceAll
+ type: string
+ managedBootImages:
+ description: managedBootImages allows configuration for the management
+ of boot images for machine resources within the cluster. This configuration
+ allows users to select resources that should be updated to the latest
+ boot images during cluster upgrades, ensuring that new machines
+ always boot with the current cluster version's boot image. When
+ omitted, no boot images will be updated.
+ properties:
+ machineManagers:
+ description: machineManagers can be used to register machine management
+ resources for boot image updates. The Machine Config Operator
+ will watch for changes to this list. Only one entry is permitted
+ per type of machine management resource.
+ items:
+ description: MachineManager describes a target machine resource
+ that is registered for boot image updates. It stores identifying
+ information such as the resource type and the API Group of
+ the resource. It also provides granular control via the selection
+ field.
+ properties:
+ apiGroup:
+ description: apiGroup is name of the APIGroup that the machine
+ management resource belongs to. The only current valid
+ value is machine.openshift.io. machine.openshift.io means
+ that the machine manager will only register resources
+ that belong to OpenShift machine API group.
+ enum:
+ - machine.openshift.io
+ type: string
+ resource:
+ description: resource is the machine management resource's
+ type. The only current valid value is machinesets. machinesets
+ means that the machine manager will only register resources
+ of the kind MachineSet.
+ enum:
+ - machinesets
+ type: string
+ selection:
+ description: selection allows granular control of the machine
+ management resources that will be registered for boot
+ image updates.
+ properties:
+ mode:
+ description: mode determines how machine managers will
+ be selected for updates. Valid values are All and
+ Partial. All means that every resource matched by
+ the machine manager will be updated. Partial requires
+ specified selector(s) and allows customisation of
+ which resources matched by the machine manager will
+ be updated.
+ enum:
+ - All
+ - Partial
+ type: string
+ partial:
+ description: partial provides label selector(s) that
+ can be used to match machine management resources.
+ Only permitted when mode is set to "Partial".
+ properties:
+ machineResourceSelector:
+ description: machineResourceSelector is a label
+ selector that can be used to select machine resources
+ like MachineSets.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: A label selector requirement
+ is a selector that contains values, a key,
+ and an operator that relates the key and
+ values.
+ properties:
+ key:
+ description: key is the label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's
+ relationship to a set of values. Valid
+ operators are In, NotIn, Exists and
+ DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string
+ values. If the operator is In or NotIn,
+ the values array must be non-empty.
+ If the operator is Exists or DoesNotExist,
+ the values array must be empty. This
+ array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value}
+ pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions,
+ whose key field is "key", the operator is
+ "In", and the values array contains only "value".
+ The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - machineResourceSelector
+ type: object
+ required:
+ - mode
+ type: object
+ x-kubernetes-validations:
+ - message: Partial is required when type is partial, and
+ forbidden otherwise
+ rule: 'has(self.mode) && self.mode == ''Partial'' ? has(self.partial)
+ : !has(self.partial)'
+ required:
+ - apiGroup
+ - resource
+ - selection
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - resource
+ - apiGroup
+ x-kubernetes-list-type: map
+ type: object
+ managementState:
+ description: managementState indicates whether and how the operator
+ should manage the component
+ pattern: ^(Managed|Unmanaged|Force|Removed)$
+ type: string
+ nodeDisruptionPolicy:
+ description: nodeDisruptionPolicy allows an admin to set granular
+ node disruption actions for MachineConfig-based updates, such as
+ drains, service reloads, etc. Specifying this will allow for less
+ downtime when doing small configuration updates to the cluster.
+ This configuration has no effect on cluster upgrades which will
+ still incur node disruption where required.
+ properties:
+ files:
+ description: files is a list of MachineConfig file definitions
+ and actions to take to changes on those paths This list supports
+ a maximum of 50 entries.
+ items:
+ description: NodeDisruptionPolicySpecFile is a file entry and
+ corresponding actions to take and is used in the NodeDisruptionPolicyConfig
+ object
+ properties:
+ actions:
+ description: actions represents the series of commands to
+ be executed on changes to the file at the corresponding
+ file path. Actions will be applied in the order that they
+ are set in this list. If there are other incoming changes
+ to other MachineConfig entries in the same update that
+ require a reboot, the reboot will supercede these actions.
+ Valid actions are Reboot, Drain, Reload, DaemonReload
+ and None. The Reboot action and the None action cannot
+ be used in conjunction with any of the other actions.
+ This list supports a maximum of 10 entries.
+ items:
+ properties:
+ reload:
+ description: reload specifies the service to reload,
+ only valid if type is reload
+ properties:
+ serviceName:
+ description: serviceName is the full name (e.g.
+ crio.service) of the service to be reloaded
+ Service names should be of the format ${NAME}${SERVICETYPE}
+ and can up to 255 characters long. ${NAME} must
+ be atleast 1 character long and can only consist
+ of alphabets, digits, ":", "-", "_", ".", and
+ "\". ${SERVICETYPE} must be one of ".service",
+ ".socket", ".device", ".mount", ".automount",
+ ".swap", ".target", ".path", ".timer", ".snapshot",
+ ".slice" or ".scope".
+ maxLength: 255
+ type: string
+ x-kubernetes-validations:
+ - message: Invalid ${SERVICETYPE} in service name.
+ Expected format is ${NAME}${SERVICETYPE},
+ where ${SERVICETYPE} must be one of ".service",
+ ".socket", ".device", ".mount", ".automount",
+ ".swap", ".target", ".path", ".timer",".snapshot",
+ ".slice" or ".scope".
+ rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$')
+ - message: Invalid ${NAME} in service name. Expected
+ format is ${NAME}${SERVICETYPE}, where {NAME}
+ must be atleast 1 character long and can only
+ consist of alphabets, digits, ":", "-", "_",
+ ".", and "\"
+ rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..')
+ required:
+ - serviceName
+ type: object
+ restart:
+ description: restart specifies the service to restart,
+ only valid if type is restart
+ properties:
+ serviceName:
+ description: serviceName is the full name (e.g.
+ crio.service) of the service to be restarted
+ Service names should be of the format ${NAME}${SERVICETYPE}
+ and can up to 255 characters long. ${NAME} must
+ be atleast 1 character long and can only consist
+ of alphabets, digits, ":", "-", "_", ".", and
+ "\". ${SERVICETYPE} must be one of ".service",
+ ".socket", ".device", ".mount", ".automount",
+ ".swap", ".target", ".path", ".timer", ".snapshot",
+ ".slice" or ".scope".
+ maxLength: 255
+ type: string
+ x-kubernetes-validations:
+ - message: Invalid ${SERVICETYPE} in service name.
+ Expected format is ${NAME}${SERVICETYPE},
+ where ${SERVICETYPE} must be one of ".service",
+ ".socket", ".device", ".mount", ".automount",
+ ".swap", ".target", ".path", ".timer",".snapshot",
+ ".slice" or ".scope".
+ rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$')
+ - message: Invalid ${NAME} in service name. Expected
+ format is ${NAME}${SERVICETYPE}, where {NAME}
+ must be atleast 1 character long and can only
+ consist of alphabets, digits, ":", "-", "_",
+ ".", and "\"
+ rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..')
+ required:
+ - serviceName
+ type: object
+ type:
+ description: type represents the commands that will
+ be carried out if this NodeDisruptionPolicySpecActionType
+ is executed Valid values are Reboot, Drain, Reload,
+ Restart, DaemonReload and None. reload/restart requires
+ a corresponding service target specified in the
+ reload/restart field. Other values require no further
+ configuration
+ enum:
+ - Reboot
+ - Drain
+ - Reload
+ - Restart
+ - DaemonReload
+ - None
+ type: string
+ required:
+ - type
+ type: object
+ x-kubernetes-validations:
+ - message: reload is required when type is Reload, and
+ forbidden otherwise
+ rule: 'has(self.type) && self.type == ''Reload'' ? has(self.reload)
+ : !has(self.reload)'
+ - message: restart is required when type is Restart, and
+ forbidden otherwise
+ rule: 'has(self.type) && self.type == ''Restart'' ?
+ has(self.restart) : !has(self.restart)'
+ maxItems: 10
+ type: array
+ x-kubernetes-list-type: atomic
+ x-kubernetes-validations:
+ - message: Reboot action can only be specified standalone,
+ as it will override any other actions
+ rule: 'self.exists(x, x.type==''Reboot'') ? size(self)
+ == 1 : true'
+ - message: None action can only be specified standalone,
+ as it will override any other actions
+ rule: 'self.exists(x, x.type==''None'') ? size(self) ==
+ 1 : true'
+ path:
+ description: path is the location of a file being managed
+ through a MachineConfig. The Actions in the policy will
+ apply to changes to the file at this path.
+ type: string
+ required:
+ - actions
+ - path
+ type: object
+ maxItems: 50
+ type: array
+ x-kubernetes-list-map-keys:
+ - path
+ x-kubernetes-list-type: map
+ sshkey:
+ description: sshkey maps to the ignition.sshkeys field in the
+ MachineConfig object, definition an action for this will apply
+ to all sshkey changes in the cluster
+ properties:
+ actions:
+ description: actions represents the series of commands to
+ be executed on changes to the file at the corresponding
+ file path. Actions will be applied in the order that they
+ are set in this list. If there are other incoming changes
+ to other MachineConfig entries in the same update that require
+ a reboot, the reboot will supercede these actions. Valid
+ actions are Reboot, Drain, Reload, DaemonReload and None.
+ The Reboot action and the None action cannot be used in
+ conjunction with any of the other actions. This list supports
+ a maximum of 10 entries.
+ items:
+ properties:
+ reload:
+ description: reload specifies the service to reload,
+ only valid if type is reload
+ properties:
+ serviceName:
+ description: serviceName is the full name (e.g.
+ crio.service) of the service to be reloaded Service
+ names should be of the format ${NAME}${SERVICETYPE}
+ and can up to 255 characters long. ${NAME} must
+ be atleast 1 character long and can only consist
+ of alphabets, digits, ":", "-", "_", ".", and
+ "\". ${SERVICETYPE} must be one of ".service",
+ ".socket", ".device", ".mount", ".automount",
+ ".swap", ".target", ".path", ".timer", ".snapshot",
+ ".slice" or ".scope".
+ maxLength: 255
+ type: string
+ x-kubernetes-validations:
+ - message: Invalid ${SERVICETYPE} in service name.
+ Expected format is ${NAME}${SERVICETYPE}, where
+ ${SERVICETYPE} must be one of ".service", ".socket",
+ ".device", ".mount", ".automount", ".swap",
+ ".target", ".path", ".timer",".snapshot", ".slice"
+ or ".scope".
+ rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$')
+ - message: Invalid ${NAME} in service name. Expected
+ format is ${NAME}${SERVICETYPE}, where {NAME}
+ must be atleast 1 character long and can only
+ consist of alphabets, digits, ":", "-", "_",
+ ".", and "\"
+ rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..')
+ required:
+ - serviceName
+ type: object
+ restart:
+ description: restart specifies the service to restart,
+ only valid if type is restart
+ properties:
+ serviceName:
+ description: serviceName is the full name (e.g.
+ crio.service) of the service to be restarted Service
+ names should be of the format ${NAME}${SERVICETYPE}
+ and can up to 255 characters long. ${NAME} must
+ be atleast 1 character long and can only consist
+ of alphabets, digits, ":", "-", "_", ".", and
+ "\". ${SERVICETYPE} must be one of ".service",
+ ".socket", ".device", ".mount", ".automount",
+ ".swap", ".target", ".path", ".timer", ".snapshot",
+ ".slice" or ".scope".
+ maxLength: 255
+ type: string
+ x-kubernetes-validations:
+ - message: Invalid ${SERVICETYPE} in service name.
+ Expected format is ${NAME}${SERVICETYPE}, where
+ ${SERVICETYPE} must be one of ".service", ".socket",
+ ".device", ".mount", ".automount", ".swap",
+ ".target", ".path", ".timer",".snapshot", ".slice"
+ or ".scope".
+ rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$')
+ - message: Invalid ${NAME} in service name. Expected
+ format is ${NAME}${SERVICETYPE}, where {NAME}
+ must be atleast 1 character long and can only
+ consist of alphabets, digits, ":", "-", "_",
+ ".", and "\"
+ rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..')
+ required:
+ - serviceName
+ type: object
+ type:
+ description: type represents the commands that will
+ be carried out if this NodeDisruptionPolicySpecActionType
+ is executed Valid values are Reboot, Drain, Reload,
+ Restart, DaemonReload and None. reload/restart requires
+ a corresponding service target specified in the reload/restart
+ field. Other values require no further configuration
+ enum:
+ - Reboot
+ - Drain
+ - Reload
+ - Restart
+ - DaemonReload
+ - None
+ type: string
+ required:
+ - type
+ type: object
+ x-kubernetes-validations:
+ - message: reload is required when type is Reload, and forbidden
+ otherwise
+ rule: 'has(self.type) && self.type == ''Reload'' ? has(self.reload)
+ : !has(self.reload)'
+ - message: restart is required when type is Restart, and
+ forbidden otherwise
+ rule: 'has(self.type) && self.type == ''Restart'' ? has(self.restart)
+ : !has(self.restart)'
+ maxItems: 10
+ type: array
+ x-kubernetes-list-type: atomic
+ x-kubernetes-validations:
+ - message: Reboot action can only be specified standalone,
+ as it will override any other actions
+ rule: 'self.exists(x, x.type==''Reboot'') ? size(self) ==
+ 1 : true'
+ - message: None action can only be specified standalone, as
+ it will override any other actions
+ rule: 'self.exists(x, x.type==''None'') ? size(self) ==
+ 1 : true'
+ required:
+ - actions
+ type: object
+ units:
+ description: units is a list MachineConfig unit definitions and
+ actions to take on changes to those services This list supports
+ a maximum of 50 entries.
+ items:
+ description: NodeDisruptionPolicySpecUnit is a systemd unit
+ name and corresponding actions to take and is used in the
+ NodeDisruptionPolicyConfig object
+ properties:
+ actions:
+ description: actions represents the series of commands to
+ be executed on changes to the file at the corresponding
+ file path. Actions will be applied in the order that they
+ are set in this list. If there are other incoming changes
+ to other MachineConfig entries in the same update that
+ require a reboot, the reboot will supercede these actions.
+ Valid actions are Reboot, Drain, Reload, DaemonReload
+ and None. The Reboot action and the None action cannot
+ be used in conjunction with any of the other actions.
+ This list supports a maximum of 10 entries.
+ items:
+ properties:
+ reload:
+ description: reload specifies the service to reload,
+ only valid if type is reload
+ properties:
+ serviceName:
+ description: serviceName is the full name (e.g.
+ crio.service) of the service to be reloaded
+ Service names should be of the format ${NAME}${SERVICETYPE}
+ and can up to 255 characters long. ${NAME} must
+ be atleast 1 character long and can only consist
+ of alphabets, digits, ":", "-", "_", ".", and
+ "\". ${SERVICETYPE} must be one of ".service",
+ ".socket", ".device", ".mount", ".automount",
+ ".swap", ".target", ".path", ".timer", ".snapshot",
+ ".slice" or ".scope".
+ maxLength: 255
+ type: string
+ x-kubernetes-validations:
+ - message: Invalid ${SERVICETYPE} in service name.
+ Expected format is ${NAME}${SERVICETYPE},
+ where ${SERVICETYPE} must be one of ".service",
+ ".socket", ".device", ".mount", ".automount",
+ ".swap", ".target", ".path", ".timer",".snapshot",
+ ".slice" or ".scope".
+ rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$')
+ - message: Invalid ${NAME} in service name. Expected
+ format is ${NAME}${SERVICETYPE}, where {NAME}
+ must be atleast 1 character long and can only
+ consist of alphabets, digits, ":", "-", "_",
+ ".", and "\"
+ rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..')
+ required:
+ - serviceName
+ type: object
+ restart:
+ description: restart specifies the service to restart,
+ only valid if type is restart
+ properties:
+ serviceName:
+ description: serviceName is the full name (e.g.
+ crio.service) of the service to be restarted
+ Service names should be of the format ${NAME}${SERVICETYPE}
+ and can up to 255 characters long. ${NAME} must
+ be atleast 1 character long and can only consist
+ of alphabets, digits, ":", "-", "_", ".", and
+ "\". ${SERVICETYPE} must be one of ".service",
+ ".socket", ".device", ".mount", ".automount",
+ ".swap", ".target", ".path", ".timer", ".snapshot",
+ ".slice" or ".scope".
+ maxLength: 255
+ type: string
+ x-kubernetes-validations:
+ - message: Invalid ${SERVICETYPE} in service name.
+ Expected format is ${NAME}${SERVICETYPE},
+ where ${SERVICETYPE} must be one of ".service",
+ ".socket", ".device", ".mount", ".automount",
+ ".swap", ".target", ".path", ".timer",".snapshot",
+ ".slice" or ".scope".
+ rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$')
+ - message: Invalid ${NAME} in service name. Expected
+ format is ${NAME}${SERVICETYPE}, where {NAME}
+ must be atleast 1 character long and can only
+ consist of alphabets, digits, ":", "-", "_",
+ ".", and "\"
+ rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..')
+ required:
+ - serviceName
+ type: object
+ type:
+ description: type represents the commands that will
+ be carried out if this NodeDisruptionPolicySpecActionType
+ is executed Valid values are Reboot, Drain, Reload,
+ Restart, DaemonReload and None. reload/restart requires
+ a corresponding service target specified in the
+ reload/restart field. Other values require no further
+ configuration
+ enum:
+ - Reboot
+ - Drain
+ - Reload
+ - Restart
+ - DaemonReload
+ - None
+ type: string
+ required:
+ - type
+ type: object
+ x-kubernetes-validations:
+ - message: reload is required when type is Reload, and
+ forbidden otherwise
+ rule: 'has(self.type) && self.type == ''Reload'' ? has(self.reload)
+ : !has(self.reload)'
+ - message: restart is required when type is Restart, and
+ forbidden otherwise
+ rule: 'has(self.type) && self.type == ''Restart'' ?
+ has(self.restart) : !has(self.restart)'
+ maxItems: 10
+ type: array
+ x-kubernetes-list-type: atomic
+ x-kubernetes-validations:
+ - message: Reboot action can only be specified standalone,
+ as it will override any other actions
+ rule: 'self.exists(x, x.type==''Reboot'') ? size(self)
+ == 1 : true'
+ - message: None action can only be specified standalone,
+ as it will override any other actions
+ rule: 'self.exists(x, x.type==''None'') ? size(self) ==
+ 1 : true'
+ name:
+ description: name represents the service name of a systemd
+ service managed through a MachineConfig Actions specified
+ will be applied for changes to the named service. Service
+ names should be of the format ${NAME}${SERVICETYPE} and
+ can up to 255 characters long. ${NAME} must be atleast
+ 1 character long and can only consist of alphabets, digits,
+ ":", "-", "_", ".", and "\". ${SERVICETYPE} must be one
+ of ".service", ".socket", ".device", ".mount", ".automount",
+ ".swap", ".target", ".path", ".timer", ".snapshot", ".slice"
+ or ".scope".
+ maxLength: 255
+ type: string
+ x-kubernetes-validations:
+ - message: Invalid ${SERVICETYPE} in service name. Expected
+ format is ${NAME}${SERVICETYPE}, where ${SERVICETYPE}
+ must be one of ".service", ".socket", ".device", ".mount",
+ ".automount", ".swap", ".target", ".path", ".timer",".snapshot",
+ ".slice" or ".scope".
+ rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$')
+ - message: Invalid ${NAME} in service name. Expected format
+ is ${NAME}${SERVICETYPE}, where {NAME} must be atleast
+ 1 character long and can only consist of alphabets,
+ digits, ":", "-", "_", ".", and "\"
+ rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..')
+ required:
+ - actions
+ - name
+ type: object
+ maxItems: 50
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ type: object
+ observedConfig:
+ description: observedConfig holds a sparse config that controller
+ has observed from the cluster state. It exists in spec because
+ it is an input to the level for the operator
+ nullable: true
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ operatorLogLevel:
+ default: Normal
+ description: "operatorLogLevel is an intent based logging for the
+ operator itself. It does not give fine grained control, but it
+ is a simple way to manage coarse grained logging choices that operators
+ have to interpret for themselves. \n Valid values are: \"Normal\",
+ \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"."
+ enum:
+ - ""
+ - Normal
+ - Debug
+ - Trace
+ - TraceAll
+ type: string
+ succeededRevisionLimit:
+ description: succeededRevisionLimit is the number of successful static
+ pod installer revisions to keep on disk and in the api -1 = unlimited,
+ 0 or unset = 5 (default)
+ format: int32
+ type: integer
+ unsupportedConfigOverrides:
+ description: unsupportedConfigOverrides overrides the final configuration
+ that was computed by the operator. Red Hat does not support the
+ use of this field. Misuse of this field could lead to unexpected
+ behavior or conflict with other configuration options. Seek guidance
+ from the Red Hat support before using this field. Use of this property
+ blocks cluster upgrades, it must be removed before upgrading your
+ cluster.
+ nullable: true
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ status:
+ description: status is the most recently observed status of the Machine
+ Config Operator
+ properties:
+ conditions:
+ description: conditions is a list of conditions and their status
+ items:
+ description: OperatorCondition is just the standard condition fields.
+ properties:
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ type: string
+ reason:
+ type: string
+ status:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - type
+ x-kubernetes-list-type: map
+ generations:
+ description: generations are used to determine when an item needs
+ to be reconciled or has changed in a way that needs a reaction.
+ items:
+ description: GenerationStatus keeps track of the generation for
+ a given resource so that decisions about forced updates can be
+ made.
+ properties:
+ group:
+ description: group is the group of the thing you're tracking
+ type: string
+ hash:
+ description: hash is an optional field set for resources without
+ generation that are content sensitive like secrets and configmaps
+ type: string
+ lastGeneration:
+ description: lastGeneration is the last generation of the workload
+ controller involved
+ format: int64
+ type: integer
+ name:
+ description: name is the name of the thing you're tracking
+ type: string
+ namespace:
+ description: namespace is where the thing you're tracking is
+ type: string
+ resource:
+ description: resource is the resource type of the thing you're
+ tracking
+ type: string
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ latestAvailableRevision:
+ description: latestAvailableRevision is the deploymentID of the most
+ recent deployment
+ format: int32
+ type: integer
+ latestAvailableRevisionReason:
+ description: latestAvailableRevisionReason describe the detailed reason
+ for the most recent deployment
+ type: string
+ nodeDisruptionPolicyStatus:
+ description: nodeDisruptionPolicyStatus status reflects what the latest
+ cluster-validated policies are, and will be used by the Machine
+ Config Daemon during future node updates.
+ properties:
+ clusterPolicies:
+ description: clusterPolicies is a merge of cluster default and
+ user provided node disruption policies.
+ properties:
+ files:
+ description: files is a list of MachineConfig file definitions
+ and actions to take to changes on those paths
+ items:
+ description: NodeDisruptionPolicyStatusFile is a file entry
+ and corresponding actions to take and is used in the NodeDisruptionPolicyClusterStatus
+ object
+ properties:
+ actions:
+ description: actions represents the series of commands
+ to be executed on changes to the file at the corresponding
+ file path. Actions will be applied in the order that
+ they are set in this list. If there are other incoming
+ changes to other MachineConfig entries in the same
+ update that require a reboot, the reboot will supercede
+ these actions. Valid actions are Reboot, Drain, Reload,
+ DaemonReload and None. The Reboot action and the None
+ action cannot be used in conjunction with any of the
+ other actions. This list supports a maximum of 10
+ entries.
+ items:
+ properties:
+ reload:
+ description: reload specifies the service to reload,
+ only valid if type is reload
+ properties:
+ serviceName:
+ description: serviceName is the full name
+ (e.g. crio.service) of the service to be
+ reloaded Service names should be of the
+ format ${NAME}${SERVICETYPE} and can up
+ to 255 characters long. ${NAME} must be
+ atleast 1 character long and can only consist
+ of alphabets, digits, ":", "-", "_", ".",
+ and "\". ${SERVICETYPE} must be one of ".service",
+ ".socket", ".device", ".mount", ".automount",
+ ".swap", ".target", ".path", ".timer", ".snapshot",
+ ".slice" or ".scope".
+ maxLength: 255
+ type: string
+ x-kubernetes-validations:
+ - message: Invalid ${SERVICETYPE} in service
+ name. Expected format is ${NAME}${SERVICETYPE},
+ where ${SERVICETYPE} must be one of ".service",
+ ".socket", ".device", ".mount", ".automount",
+ ".swap", ".target", ".path", ".timer",".snapshot",
+ ".slice" or ".scope".
+ rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$')
+ - message: Invalid ${NAME} in service name.
+ Expected format is ${NAME}${SERVICETYPE},
+ where {NAME} must be atleast 1 character
+ long and can only consist of alphabets,
+ digits, ":", "-", "_", ".", and "\"
+ rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..')
+ required:
+ - serviceName
+ type: object
+ restart:
+ description: restart specifies the service to
+ restart, only valid if type is restart
+ properties:
+ serviceName:
+ description: serviceName is the full name
+ (e.g. crio.service) of the service to be
+ restarted Service names should be of the
+ format ${NAME}${SERVICETYPE} and can up
+ to 255 characters long. ${NAME} must be
+ atleast 1 character long and can only consist
+ of alphabets, digits, ":", "-", "_", ".",
+ and "\". ${SERVICETYPE} must be one of ".service",
+ ".socket", ".device", ".mount", ".automount",
+ ".swap", ".target", ".path", ".timer", ".snapshot",
+ ".slice" or ".scope".
+ maxLength: 255
+ type: string
+ x-kubernetes-validations:
+ - message: Invalid ${SERVICETYPE} in service
+ name. Expected format is ${NAME}${SERVICETYPE},
+ where ${SERVICETYPE} must be one of ".service",
+ ".socket", ".device", ".mount", ".automount",
+ ".swap", ".target", ".path", ".timer",".snapshot",
+ ".slice" or ".scope".
+ rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$')
+ - message: Invalid ${NAME} in service name.
+ Expected format is ${NAME}${SERVICETYPE},
+ where {NAME} must be atleast 1 character
+ long and can only consist of alphabets,
+ digits, ":", "-", "_", ".", and "\"
+ rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..')
+ required:
+ - serviceName
+ type: object
+ type:
+ description: type represents the commands that
+ will be carried out if this NodeDisruptionPolicyStatusActionType
+ is executed Valid values are Reboot, Drain,
+ Reload, Restart, DaemonReload, None and Special.
+ reload/restart requires a corresponding service
+ target specified in the reload/restart field.
+ Other values require no further configuration
+ enum:
+ - Reboot
+ - Drain
+ - Reload
+ - Restart
+ - DaemonReload
+ - None
+ - Special
+ type: string
+ required:
+ - type
+ type: object
+ x-kubernetes-validations:
+ - message: reload is required when type is Reload,
+ and forbidden otherwise
+ rule: 'has(self.type) && self.type == ''Reload''
+ ? has(self.reload) : !has(self.reload)'
+ - message: restart is required when type is Restart,
+ and forbidden otherwise
+ rule: 'has(self.type) && self.type == ''Restart''
+ ? has(self.restart) : !has(self.restart)'
+ maxItems: 10
+ type: array
+ x-kubernetes-list-type: atomic
+ x-kubernetes-validations:
+ - message: Reboot action can only be specified standalone,
+ as it will override any other actions
+ rule: 'self.exists(x, x.type==''Reboot'') ? size(self)
+ == 1 : true'
+ - message: None action can only be specified standalone,
+ as it will override any other actions
+ rule: 'self.exists(x, x.type==''None'') ? size(self)
+ == 1 : true'
+ path:
+ description: path is the location of a file being managed
+ through a MachineConfig. The Actions in the policy
+ will apply to changes to the file at this path.
+ type: string
+ required:
+ - actions
+ - path
+ type: object
+ maxItems: 100
+ type: array
+ x-kubernetes-list-map-keys:
+ - path
+ x-kubernetes-list-type: map
+ sshkey:
+ description: sshkey is the overall sshkey MachineConfig definition
+ properties:
+ actions:
+ description: actions represents the series of commands
+ to be executed on changes to the file at the corresponding
+ file path. Actions will be applied in the order that
+ they are set in this list. If there are other incoming
+ changes to other MachineConfig entries in the same update
+ that require a reboot, the reboot will supercede these
+ actions. Valid actions are Reboot, Drain, Reload, DaemonReload
+ and None. The Reboot action and the None action cannot
+ be used in conjunction with any of the other actions.
+ This list supports a maximum of 10 entries.
+ items:
+ properties:
+ reload:
+ description: reload specifies the service to reload,
+ only valid if type is reload
+ properties:
+ serviceName:
+ description: serviceName is the full name (e.g.
+ crio.service) of the service to be reloaded
+ Service names should be of the format ${NAME}${SERVICETYPE}
+ and can up to 255 characters long. ${NAME}
+ must be atleast 1 character long and can only
+ consist of alphabets, digits, ":", "-", "_",
+ ".", and "\". ${SERVICETYPE} must be one of
+ ".service", ".socket", ".device", ".mount",
+ ".automount", ".swap", ".target", ".path",
+ ".timer", ".snapshot", ".slice" or ".scope".
+ maxLength: 255
+ type: string
+ x-kubernetes-validations:
+ - message: Invalid ${SERVICETYPE} in service
+ name. Expected format is ${NAME}${SERVICETYPE},
+ where ${SERVICETYPE} must be one of ".service",
+ ".socket", ".device", ".mount", ".automount",
+ ".swap", ".target", ".path", ".timer",".snapshot",
+ ".slice" or ".scope".
+ rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$')
+ - message: Invalid ${NAME} in service name.
+ Expected format is ${NAME}${SERVICETYPE},
+ where {NAME} must be atleast 1 character
+ long and can only consist of alphabets,
+ digits, ":", "-", "_", ".", and "\"
+ rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..')
+ required:
+ - serviceName
+ type: object
+ restart:
+ description: restart specifies the service to restart,
+ only valid if type is restart
+ properties:
+ serviceName:
+ description: serviceName is the full name (e.g.
+ crio.service) of the service to be restarted
+ Service names should be of the format ${NAME}${SERVICETYPE}
+ and can up to 255 characters long. ${NAME}
+ must be atleast 1 character long and can only
+ consist of alphabets, digits, ":", "-", "_",
+ ".", and "\". ${SERVICETYPE} must be one of
+ ".service", ".socket", ".device", ".mount",
+ ".automount", ".swap", ".target", ".path",
+ ".timer", ".snapshot", ".slice" or ".scope".
+ maxLength: 255
+ type: string
+ x-kubernetes-validations:
+ - message: Invalid ${SERVICETYPE} in service
+ name. Expected format is ${NAME}${SERVICETYPE},
+ where ${SERVICETYPE} must be one of ".service",
+ ".socket", ".device", ".mount", ".automount",
+ ".swap", ".target", ".path", ".timer",".snapshot",
+ ".slice" or ".scope".
+ rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$')
+ - message: Invalid ${NAME} in service name.
+ Expected format is ${NAME}${SERVICETYPE},
+ where {NAME} must be atleast 1 character
+ long and can only consist of alphabets,
+ digits, ":", "-", "_", ".", and "\"
+ rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..')
+ required:
+ - serviceName
+ type: object
+ type:
+ description: type represents the commands that will
+ be carried out if this NodeDisruptionPolicyStatusActionType
+ is executed Valid values are Reboot, Drain, Reload,
+ Restart, DaemonReload, None and Special. reload/restart
+ requires a corresponding service target specified
+ in the reload/restart field. Other values require
+ no further configuration
+ enum:
+ - Reboot
+ - Drain
+ - Reload
+ - Restart
+ - DaemonReload
+ - None
+ - Special
+ type: string
+ required:
+ - type
+ type: object
+ x-kubernetes-validations:
+ - message: reload is required when type is Reload, and
+ forbidden otherwise
+ rule: 'has(self.type) && self.type == ''Reload'' ?
+ has(self.reload) : !has(self.reload)'
+ - message: restart is required when type is Restart,
+ and forbidden otherwise
+ rule: 'has(self.type) && self.type == ''Restart''
+ ? has(self.restart) : !has(self.restart)'
+ maxItems: 10
+ type: array
+ x-kubernetes-list-type: atomic
+ x-kubernetes-validations:
+ - message: Reboot action can only be specified standalone,
+ as it will override any other actions
+ rule: 'self.exists(x, x.type==''Reboot'') ? size(self)
+ == 1 : true'
+ - message: None action can only be specified standalone,
+ as it will override any other actions
+ rule: 'self.exists(x, x.type==''None'') ? size(self)
+ == 1 : true'
+ required:
+ - actions
+ type: object
+ units:
+ description: units is a list MachineConfig unit definitions
+ and actions to take on changes to those services
+ items:
+ description: NodeDisruptionPolicyStatusUnit is a systemd
+ unit name and corresponding actions to take and is used
+ in the NodeDisruptionPolicyClusterStatus object
+ properties:
+ actions:
+ description: actions represents the series of commands
+ to be executed on changes to the file at the corresponding
+ file path. Actions will be applied in the order that
+ they are set in this list. If there are other incoming
+ changes to other MachineConfig entries in the same
+ update that require a reboot, the reboot will supercede
+ these actions. Valid actions are Reboot, Drain, Reload,
+ DaemonReload and None. The Reboot action and the None
+ action cannot be used in conjunction with any of the
+ other actions. This list supports a maximum of 10
+ entries.
+ items:
+ properties:
+ reload:
+ description: reload specifies the service to reload,
+ only valid if type is reload
+ properties:
+ serviceName:
+ description: serviceName is the full name
+ (e.g. crio.service) of the service to be
+ reloaded Service names should be of the
+ format ${NAME}${SERVICETYPE} and can up
+ to 255 characters long. ${NAME} must be
+ atleast 1 character long and can only consist
+ of alphabets, digits, ":", "-", "_", ".",
+ and "\". ${SERVICETYPE} must be one of ".service",
+ ".socket", ".device", ".mount", ".automount",
+ ".swap", ".target", ".path", ".timer", ".snapshot",
+ ".slice" or ".scope".
+ maxLength: 255
+ type: string
+ x-kubernetes-validations:
+ - message: Invalid ${SERVICETYPE} in service
+ name. Expected format is ${NAME}${SERVICETYPE},
+ where ${SERVICETYPE} must be one of ".service",
+ ".socket", ".device", ".mount", ".automount",
+ ".swap", ".target", ".path", ".timer",".snapshot",
+ ".slice" or ".scope".
+ rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$')
+ - message: Invalid ${NAME} in service name.
+ Expected format is ${NAME}${SERVICETYPE},
+ where {NAME} must be atleast 1 character
+ long and can only consist of alphabets,
+ digits, ":", "-", "_", ".", and "\"
+ rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..')
+ required:
+ - serviceName
+ type: object
+ restart:
+ description: restart specifies the service to
+ restart, only valid if type is restart
+ properties:
+ serviceName:
+ description: serviceName is the full name
+ (e.g. crio.service) of the service to be
+ restarted Service names should be of the
+ format ${NAME}${SERVICETYPE} and can up
+ to 255 characters long. ${NAME} must be
+ atleast 1 character long and can only consist
+ of alphabets, digits, ":", "-", "_", ".",
+ and "\". ${SERVICETYPE} must be one of ".service",
+ ".socket", ".device", ".mount", ".automount",
+ ".swap", ".target", ".path", ".timer", ".snapshot",
+ ".slice" or ".scope".
+ maxLength: 255
+ type: string
+ x-kubernetes-validations:
+ - message: Invalid ${SERVICETYPE} in service
+ name. Expected format is ${NAME}${SERVICETYPE},
+ where ${SERVICETYPE} must be one of ".service",
+ ".socket", ".device", ".mount", ".automount",
+ ".swap", ".target", ".path", ".timer",".snapshot",
+ ".slice" or ".scope".
+ rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$')
+ - message: Invalid ${NAME} in service name.
+ Expected format is ${NAME}${SERVICETYPE},
+ where {NAME} must be atleast 1 character
+ long and can only consist of alphabets,
+ digits, ":", "-", "_", ".", and "\"
+ rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..')
+ required:
+ - serviceName
+ type: object
+ type:
+ description: type represents the commands that
+ will be carried out if this NodeDisruptionPolicyStatusActionType
+ is executed Valid values are Reboot, Drain,
+ Reload, Restart, DaemonReload, None and Special.
+ reload/restart requires a corresponding service
+ target specified in the reload/restart field.
+ Other values require no further configuration
+ enum:
+ - Reboot
+ - Drain
+ - Reload
+ - Restart
+ - DaemonReload
+ - None
+ - Special
+ type: string
+ required:
+ - type
+ type: object
+ x-kubernetes-validations:
+ - message: reload is required when type is Reload,
+ and forbidden otherwise
+ rule: 'has(self.type) && self.type == ''Reload''
+ ? has(self.reload) : !has(self.reload)'
+ - message: restart is required when type is Restart,
+ and forbidden otherwise
+ rule: 'has(self.type) && self.type == ''Restart''
+ ? has(self.restart) : !has(self.restart)'
+ maxItems: 10
+ type: array
+ x-kubernetes-list-type: atomic
+ x-kubernetes-validations:
+ - message: Reboot action can only be specified standalone,
+ as it will override any other actions
+ rule: 'self.exists(x, x.type==''Reboot'') ? size(self)
+ == 1 : true'
+ - message: None action can only be specified standalone,
+ as it will override any other actions
+ rule: 'self.exists(x, x.type==''None'') ? size(self)
+ == 1 : true'
+ name:
+ description: name represents the service name of a systemd
+ service managed through a MachineConfig Actions specified
+ will be applied for changes to the named service.
+ Service names should be of the format ${NAME}${SERVICETYPE}
+ and can up to 255 characters long. ${NAME} must be
+ atleast 1 character long and can only consist of alphabets,
+ digits, ":", "-", "_", ".", and "\". ${SERVICETYPE}
+ must be one of ".service", ".socket", ".device", ".mount",
+ ".automount", ".swap", ".target", ".path", ".timer",
+ ".snapshot", ".slice" or ".scope".
+ maxLength: 255
+ type: string
+ x-kubernetes-validations:
+ - message: Invalid ${SERVICETYPE} in service name. Expected
+ format is ${NAME}${SERVICETYPE}, where ${SERVICETYPE}
+ must be one of ".service", ".socket", ".device",
+ ".mount", ".automount", ".swap", ".target", ".path",
+ ".timer",".snapshot", ".slice" or ".scope".
+ rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$')
+ - message: Invalid ${NAME} in service name. Expected
+ format is ${NAME}${SERVICETYPE}, where {NAME} must
+ be atleast 1 character long and can only consist
+ of alphabets, digits, ":", "-", "_", ".", and "\"
+ rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..')
+ required:
+ - actions
+ - name
+ type: object
+ maxItems: 100
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ type: object
+ type: object
+ nodeStatuses:
+ description: nodeStatuses track the deployment values and errors across
+ individual nodes
+ items:
+ description: NodeStatus provides information about the current state
+ of a particular node managed by this operator.
+ properties:
+ currentRevision:
+ description: currentRevision is the generation of the most recently
+ successful deployment
+ format: int32
+ type: integer
+ lastFailedCount:
+ description: lastFailedCount is how often the installer pod
+ of the last failed revision failed.
+ type: integer
+ lastFailedReason:
+ description: lastFailedReason is a machine readable failure
+ reason string.
+ type: string
+ lastFailedRevision:
+ description: lastFailedRevision is the generation of the deployment
+ we tried and failed to deploy.
+ format: int32
+ type: integer
+ lastFailedRevisionErrors:
+ description: lastFailedRevisionErrors is a list of human readable
+ errors during the failed deployment referenced in lastFailedRevision.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ lastFailedTime:
+ description: lastFailedTime is the time the last failed revision
+ failed the last time.
+ format: date-time
+ type: string
+ lastFallbackCount:
+ description: lastFallbackCount is how often a fallback to a
+ previous revision happened.
+ type: integer
+ nodeName:
+ description: nodeName is the name of the node
+ type: string
+ targetRevision:
+ description: targetRevision is the generation of the deployment
+ we're trying to apply
+ format: int32
+ type: integer
+ required:
+ - nodeName
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - nodeName
+ x-kubernetes-list-type: map
+ observedGeneration:
+ description: observedGeneration is the last generation change you've
+ dealt with
+ format: int64
+ type: integer
+ readyReplicas:
+ description: readyReplicas indicates how many replicas are ready and
+ at the desired state
+ format: int32
+ type: integer
+ version:
+ description: version is the level this availability applies to
+ type: string
+ type: object
+ required:
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
diff --git a/install/0000_80_machine-config_01_machineconfigurations-TechPreviewNoUpgrade.crd.yaml b/install/0000_80_machine-config_01_machineconfigurations-TechPreviewNoUpgrade.crd.yaml
index 8bb4aa8715..bf7dab04ac 100644
--- a/install/0000_80_machine-config_01_machineconfigurations-TechPreviewNoUpgrade.crd.yaml
+++ b/install/0000_80_machine-config_01_machineconfigurations-TechPreviewNoUpgrade.crd.yaml
@@ -308,11 +308,11 @@ spec:
type:
description: type represents the commands that will
be carried out if this NodeDisruptionPolicySpecActionType
- is executed Valid value are Reboot, Drain, Reload,
- Restart, DaemonReload, None and Special reload/restart
- requires a corresponding service target specified
- in the reload/restart field. Other values require
- no further configuration
+ is executed Valid values are Reboot, Drain, Reload,
+ Restart, DaemonReload and None. reload/restart requires
+ a corresponding service target specified in the
+ reload/restart field. Other values require no further
+ configuration
enum:
- Reboot
- Drain
@@ -448,11 +448,10 @@ spec:
type:
description: type represents the commands that will
be carried out if this NodeDisruptionPolicySpecActionType
- is executed Valid value are Reboot, Drain, Reload,
- Restart, DaemonReload, None and Special reload/restart
- requires a corresponding service target specified
- in the reload/restart field. Other values require
- no further configuration
+ is executed Valid values are Reboot, Drain, Reload,
+ Restart, DaemonReload and None. reload/restart requires
+ a corresponding service target specified in the reload/restart
+ field. Other values require no further configuration
enum:
- Reboot
- Drain
@@ -581,11 +580,11 @@ spec:
type:
description: type represents the commands that will
be carried out if this NodeDisruptionPolicySpecActionType
- is executed Valid value are Reboot, Drain, Reload,
- Restart, DaemonReload, None and Special reload/restart
- requires a corresponding service target specified
- in the reload/restart field. Other values require
- no further configuration
+ is executed Valid values are Reboot, Drain, Reload,
+ Restart, DaemonReload and None. reload/restart requires
+ a corresponding service target specified in the
+ reload/restart field. Other values require no further
+ configuration
enum:
- Reboot
- Drain
@@ -865,11 +864,11 @@ spec:
type:
description: type represents the commands that
will be carried out if this NodeDisruptionPolicyStatusActionType
- is executed Valid value are Reboot, Drain, Reload,
- Restart, DaemonReload, None and Special reload/restart
- requires a corresponding service target specified
- in the reload/restart field. Other values require
- no further configuration
+ is executed Valid values are Reboot, Drain,
+ Reload, Restart, DaemonReload, None and Special.
+ reload/restart requires a corresponding service
+ target specified in the reload/restart field.
+ Other values require no further configuration
enum:
- Reboot
- Drain
@@ -1004,8 +1003,8 @@ spec:
type:
description: type represents the commands that will
be carried out if this NodeDisruptionPolicyStatusActionType
- is executed Valid value are Reboot, Drain, Reload,
- Restart, DaemonReload, None and Special reload/restart
+ is executed Valid values are Reboot, Drain, Reload,
+ Restart, DaemonReload, None and Special. reload/restart
requires a corresponding service target specified
in the reload/restart field. Other values require
no further configuration
@@ -1140,11 +1139,11 @@ spec:
type:
description: type represents the commands that
will be carried out if this NodeDisruptionPolicyStatusActionType
- is executed Valid value are Reboot, Drain, Reload,
- Restart, DaemonReload, None and Special reload/restart
- requires a corresponding service target specified
- in the reload/restart field. Other values require
- no further configuration
+ is executed Valid values are Reboot, Drain,
+ Reload, Restart, DaemonReload, None and Special.
+ reload/restart requires a corresponding service
+ target specified in the reload/restart field.
+ Other values require no further configuration
enum:
- Reboot
- Drain
diff --git a/install/0000_80_machine-config_01_machineosbuilds-CustomNoUpgrade.crd.yaml b/install/0000_80_machine-config_01_machineosbuilds-CustomNoUpgrade.crd.yaml
index 51cbc46017..d35b0d2a7e 100644
--- a/install/0000_80_machine-config_01_machineosbuilds-CustomNoUpgrade.crd.yaml
+++ b/install/0000_80_machine-config_01_machineosbuilds-CustomNoUpgrade.crd.yaml
@@ -26,15 +26,12 @@ spec:
- jsonPath: .status.conditions[?(@.type=="Building")].status
name: Building
type: string
- - jsonPath: .status.conditions[?(@.type=="Ready")].status
- name: Ready
+ - jsonPath: .status.conditions[?(@.type=="Succeeded")].status
+ name: Succeeded
type: string
- jsonPath: .status.conditions[?(@.type=="Interrupted")].status
name: Interrupted
type: string
- - jsonPath: .status.conditions[?(@.type=="Restarted")].status
- name: Restarted
- type: string
- jsonPath: .status.conditions[?(@.type=="Failed")].status
name: Failed
type: string
diff --git a/install/0000_80_machine-config_01_machineosbuilds-DevPreviewNoUpgrade.crd.yaml b/install/0000_80_machine-config_01_machineosbuilds-DevPreviewNoUpgrade.crd.yaml
index f274fba11a..0360af3a04 100644
--- a/install/0000_80_machine-config_01_machineosbuilds-DevPreviewNoUpgrade.crd.yaml
+++ b/install/0000_80_machine-config_01_machineosbuilds-DevPreviewNoUpgrade.crd.yaml
@@ -26,15 +26,12 @@ spec:
- jsonPath: .status.conditions[?(@.type=="Building")].status
name: Building
type: string
- - jsonPath: .status.conditions[?(@.type=="Ready")].status
- name: Ready
+ - jsonPath: .status.conditions[?(@.type=="Succeeded")].status
+ name: Succeeded
type: string
- jsonPath: .status.conditions[?(@.type=="Interrupted")].status
name: Interrupted
type: string
- - jsonPath: .status.conditions[?(@.type=="Restarted")].status
- name: Restarted
- type: string
- jsonPath: .status.conditions[?(@.type=="Failed")].status
name: Failed
type: string
diff --git a/install/0000_80_machine-config_01_machineosbuilds-TechPreviewNoUpgrade.crd.yaml b/install/0000_80_machine-config_01_machineosbuilds-TechPreviewNoUpgrade.crd.yaml
index ffd58ed34e..bb45b00475 100644
--- a/install/0000_80_machine-config_01_machineosbuilds-TechPreviewNoUpgrade.crd.yaml
+++ b/install/0000_80_machine-config_01_machineosbuilds-TechPreviewNoUpgrade.crd.yaml
@@ -26,15 +26,12 @@ spec:
- jsonPath: .status.conditions[?(@.type=="Building")].status
name: Building
type: string
- - jsonPath: .status.conditions[?(@.type=="Ready")].status
- name: Ready
+ - jsonPath: .status.conditions[?(@.type=="Succeeded")].status
+ name: Succeeded
type: string
- jsonPath: .status.conditions[?(@.type=="Interrupted")].status
name: Interrupted
type: string
- - jsonPath: .status.conditions[?(@.type=="Restarted")].status
- name: Restarted
- type: string
- jsonPath: .status.conditions[?(@.type=="Failed")].status
name: Failed
type: string
diff --git a/pkg/controller/container-runtime-config/container_runtime_config_controller.go b/pkg/controller/container-runtime-config/container_runtime_config_controller.go
index 4e760d9cad..cefb5b5a2f 100644
--- a/pkg/controller/container-runtime-config/container_runtime_config_controller.go
+++ b/pkg/controller/container-runtime-config/container_runtime_config_controller.go
@@ -13,6 +13,7 @@ import (
ign3types "github.com/coreos/ignition/v2/config/v3_4/types"
apicfgv1 "github.com/openshift/api/config/v1"
apicfgv1alpha1 "github.com/openshift/api/config/v1alpha1"
+ features "github.com/openshift/api/features"
apioperatorsv1alpha1 "github.com/openshift/api/operator/v1alpha1"
configclientset "github.com/openshift/client-go/config/clientset/versioned"
configinformers "github.com/openshift/client-go/config/informers/externalversions"
@@ -337,7 +338,7 @@ func (ctrl *Controller) sigstoreAPIEnabled() bool {
klog.Infof("error getting current featuregates: %v", err)
return false
}
- return featureGates.Enabled(apicfgv1.FeatureGateSigstoreImageVerification)
+ return featureGates.Enabled(features.FeatureGateSigstoreImageVerification)
}
func (ctrl *Controller) updateContainerRuntimeConfig(oldObj, newObj interface{}) {
@@ -1099,7 +1100,7 @@ func RunImageBootstrap(templateDir string, controllerConfig *mcfgv1.ControllerCo
if err != nil {
return nil, err
}
- sigstoreAPIEnabled := featureGates.Enabled(apicfgv1.FeatureGateSigstoreImageVerification)
+ sigstoreAPIEnabled := featureGates.Enabled(features.FeatureGateSigstoreImageVerification)
if sigstoreAPIEnabled {
if clusterScopePolicies, err = getValidScopePolicies(clusterImagePolicies, controllerConfig.Spec.ReleaseImage, nil); err != nil {
return nil, err
diff --git a/pkg/controller/container-runtime-config/container_runtime_config_controller_test.go b/pkg/controller/container-runtime-config/container_runtime_config_controller_test.go
index 303996487f..2970f3dc41 100644
--- a/pkg/controller/container-runtime-config/container_runtime_config_controller_test.go
+++ b/pkg/controller/container-runtime-config/container_runtime_config_controller_test.go
@@ -29,6 +29,7 @@ import (
ign3types "github.com/coreos/ignition/v2/config/v3_4/types"
apicfgv1 "github.com/openshift/api/config/v1"
apicfgv1alpha1 "github.com/openshift/api/config/v1alpha1"
+ features "github.com/openshift/api/features"
mcfgv1 "github.com/openshift/api/machineconfiguration/v1"
apioperatorsv1alpha1 "github.com/openshift/api/operator/v1alpha1"
fakeconfigv1client "github.com/openshift/client-go/config/clientset/versioned/fake"
@@ -88,12 +89,12 @@ func newFixture(t *testing.T) *fixture {
f.t = t
f.objects = []runtime.Object{}
f.fgAccess = featuregates.NewHardcodedFeatureGateAccess(
- []apicfgv1.FeatureGateName{apicfgv1.FeatureGateSigstoreImageVerification},
+ []apicfgv1.FeatureGateName{features.FeatureGateSigstoreImageVerification},
[]apicfgv1.FeatureGateName{
- apicfgv1.FeatureGateExternalCloudProvider,
- apicfgv1.FeatureGateExternalCloudProviderAzure,
- apicfgv1.FeatureGateExternalCloudProviderGCP,
- apicfgv1.FeatureGateExternalCloudProviderExternal,
+ features.FeatureGateExternalCloudProvider,
+ features.FeatureGateExternalCloudProviderAzure,
+ features.FeatureGateExternalCloudProviderGCP,
+ features.FeatureGateExternalCloudProviderExternal,
},
)
return f
@@ -1179,7 +1180,7 @@ func TestRunImageBootstrap(t *testing.T) {
// both registries.conf and policy.json as blocked
imgCfg := newImageConfig("cluster", &apicfgv1.RegistrySources{InsecureRegistries: []string{"insecure-reg-1.io", "insecure-reg-2.io"}, BlockedRegistries: []string{"blocked-reg.io", "release-reg.io"}, ContainerRuntimeSearchRegistries: []string{"search-reg.io"}})
// set FeatureGateSigstoreImageVerification enabled for testing
- fgAccess := featuregates.NewHardcodedFeatureGateAccess([]apicfgv1.FeatureGateName{apicfgv1.FeatureGateSigstoreImageVerification}, []apicfgv1.FeatureGateName{})
+ fgAccess := featuregates.NewHardcodedFeatureGateAccess([]apicfgv1.FeatureGateName{features.FeatureGateSigstoreImageVerification}, []apicfgv1.FeatureGateName{})
mcs, err := RunImageBootstrap("../../../templates", cc, pools, tc.icspRules, tc.idmsRules, tc.itmsRules, imgCfg, tc.clusterImagePolicies, fgAccess)
require.NoError(t, err)
diff --git a/pkg/controller/machine-set-boot-image/machine_set_boot_image_controller.go b/pkg/controller/machine-set-boot-image/machine_set_boot_image_controller.go
index 27b9c1288f..5c29b976da 100644
--- a/pkg/controller/machine-set-boot-image/machine_set_boot_image_controller.go
+++ b/pkg/controller/machine-set-boot-image/machine_set_boot_image_controller.go
@@ -49,6 +49,7 @@ import (
"github.com/coreos/stream-metadata-go/stream"
osconfigv1 "github.com/openshift/api/config/v1"
+ features "github.com/openshift/api/features"
)
// Controller defines the machine-set-boot-image controller.
@@ -155,10 +156,10 @@ func New(
prevfeatureEnabled = false
} else {
prevfeatureEnabled = featuregates.NewFeatureGate(featureChange.Previous.Enabled, featureChange.Previous.Disabled).
- Enabled(osconfigv1.FeatureGateManagedBootImages)
+ Enabled(features.FeatureGateManagedBootImages)
}
ctrl.featureEnabled = featuregates.NewFeatureGate(featureChange.New.Enabled, featureChange.New.Disabled).
- Enabled(osconfigv1.FeatureGateManagedBootImages)
+ Enabled(features.FeatureGateManagedBootImages)
if !prevfeatureEnabled && ctrl.featureEnabled {
klog.Info("Trigger a sync as this feature was turned on")
ctrl.enqueueMAPIMachineSets()
diff --git a/pkg/controller/node/node_controller_test.go b/pkg/controller/node/node_controller_test.go
index 76c3b11060..224df1ed8f 100644
--- a/pkg/controller/node/node_controller_test.go
+++ b/pkg/controller/node/node_controller_test.go
@@ -8,6 +8,7 @@ import (
"testing"
"time"
+ features "github.com/openshift/api/features"
mcfgalphav1 "github.com/openshift/api/machineconfiguration/v1alpha1"
"github.com/openshift/library-go/pkg/operator/configobserver/featuregates"
@@ -84,7 +85,7 @@ func newFixture(t *testing.T) *fixture {
f.kubeobjects = []runtime.Object{}
f.fgAccess = featuregates.NewHardcodedFeatureGateAccess(
[]configv1.FeatureGateName{
- configv1.FeatureGatePinnedImages,
+ features.FeatureGatePinnedImages,
},
[]configv1.FeatureGateName{},
)
diff --git a/pkg/controller/node/status.go b/pkg/controller/node/status.go
index c2f3a97204..3644ca09a2 100644
--- a/pkg/controller/node/status.go
+++ b/pkg/controller/node/status.go
@@ -5,10 +5,9 @@ import (
"fmt"
"strings"
- configv1 "github.com/openshift/api/config/v1"
- "github.com/openshift/library-go/pkg/operator/configobserver/featuregates"
-
+ features "github.com/openshift/api/features"
mcfgalphav1 "github.com/openshift/api/machineconfiguration/v1alpha1"
+ "github.com/openshift/library-go/pkg/operator/configobserver/featuregates"
helpers "github.com/openshift/machine-config-operator/pkg/helpers"
mcfgv1 "github.com/openshift/api/machineconfiguration/v1"
@@ -38,13 +37,13 @@ func (ctrl *Controller) syncStatusOnly(pool *mcfgv1.MachineConfigPool) error {
list := fg.KnownFeatures()
mcnExists := false
for _, feature := range list {
- if feature == configv1.FeatureGateMachineConfigNodes {
+ if feature == features.FeatureGateMachineConfigNodes {
mcnExists = true
}
}
if err != nil {
klog.Errorf("Could not get FG: %v", err)
- } else if mcnExists && fg.Enabled(configv1.FeatureGateMachineConfigNodes) {
+ } else if mcnExists && fg.Enabled(features.FeatureGateMachineConfigNodes) {
for _, node := range nodes {
ms, err := ctrl.client.MachineconfigurationV1alpha1().MachineConfigNodes().Get(context.TODO(), node.Name, metav1.GetOptions{})
if err != nil {
@@ -116,7 +115,7 @@ func calculateStatus(fg featuregates.FeatureGate, mcs []*mcfgalphav1.MachineConf
// not ready yet
break
}
- if fg.Enabled(configv1.FeatureGatePinnedImages) {
+ if fg.Enabled(features.FeatureGatePinnedImages) {
if isPinnedImageSetNodeUpdating(state) {
updatingPinnedImageSetMachines++
}
@@ -125,7 +124,7 @@ func calculateStatus(fg featuregates.FeatureGate, mcs []*mcfgalphav1.MachineConf
if strings.Contains(cond.Message, "Error:") {
degradedMachines = append(degradedMachines, ourNode)
// populate the degradedReasons from the MachineConfigNodePinnedImageSetsDegraded condition
- if fg.Enabled(configv1.FeatureGatePinnedImages) {
+ if fg.Enabled(features.FeatureGatePinnedImages) {
if mcfgalphav1.StateProgress(cond.Type) == mcfgalphav1.MachineConfigNodePinnedImageSetsDegraded && cond.Status == metav1.ConditionTrue {
degradedReasons = append(degradedReasons, fmt.Sprintf("Node %s is reporting: %q", ourNode.Name, cond.Message))
}
@@ -200,7 +199,7 @@ func calculateStatus(fg featuregates.FeatureGate, mcs []*mcfgalphav1.MachineConf
}
// update synchronizer status for pinned image sets
- if fg.Enabled(configv1.FeatureGatePinnedImages) {
+ if fg.Enabled(features.FeatureGatePinnedImages) {
// TODO: update counts to be more granular
status.PoolSynchronizersStatus = []mcfgv1.PoolSynchronizerStatus{
{
@@ -267,7 +266,7 @@ func calculateStatus(fg featuregates.FeatureGate, mcs []*mcfgalphav1.MachineConf
// set Degraded. For now, the node_controller understand NodeDegraded & RenderDegraded = Degraded.
pinnedImageSetsDegraded := false
- if fg.Enabled(configv1.FeatureGatePinnedImages) {
+ if fg.Enabled(features.FeatureGatePinnedImages) {
pinnedImageSetsDegraded = apihelpers.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolPinnedImageSetsDegraded)
}
diff --git a/pkg/controller/node/status_test.go b/pkg/controller/node/status_test.go
index 2a4b8dad87..fdac7f9bdd 100644
--- a/pkg/controller/node/status_test.go
+++ b/pkg/controller/node/status_test.go
@@ -5,6 +5,7 @@ import (
"reflect"
"testing"
+ features "github.com/openshift/api/features"
mcfgalphav1 "github.com/openshift/api/machineconfiguration/v1alpha1"
apicfgv1 "github.com/openshift/api/config/v1"
@@ -942,8 +943,8 @@ func TestCalculateStatus(t *testing.T) {
}
fgAccess := featuregates.NewHardcodedFeatureGateAccess(
[]apicfgv1.FeatureGateName{
- apicfgv1.FeatureGateMachineConfigNodes,
- apicfgv1.FeatureGatePinnedImages,
+ features.FeatureGateMachineConfigNodes,
+ features.FeatureGatePinnedImages,
},
[]apicfgv1.FeatureGateName{},
)
diff --git a/pkg/daemon/update.go b/pkg/daemon/update.go
index cccd3dd3b4..5ec76a396a 100644
--- a/pkg/daemon/update.go
+++ b/pkg/daemon/update.go
@@ -28,7 +28,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
- v1 "github.com/openshift/api/config/v1"
+ features "github.com/openshift/api/features"
mcfgv1 "github.com/openshift/api/machineconfiguration/v1"
mcfgalphav1 "github.com/openshift/api/machineconfiguration/v1alpha1"
"github.com/openshift/library-go/pkg/operator/configobserver/featuregates"
@@ -1047,7 +1047,7 @@ func (dn *Daemon) update(oldConfig, newConfig *mcfgv1.MachineConfig, skipCertifi
var nodeDisruptionError error
var actions []string
// If FeatureGateNodeDisruptionPolicy is set, calculate NodeDisruptionPolicy based actions for this MC diff
- if fg != nil && fg.Enabled(v1.FeatureGateNodeDisruptionPolicy) {
+ if fg != nil && fg.Enabled(features.FeatureGateNodeDisruptionPolicy) {
nodeDisruptionActions, nodeDisruptionError = dn.calculatePostConfigChangeNodeDisruptionAction(diff, diffFileSet, diffUnitSet)
if nodeDisruptionError != nil {
// TODO: Fallback to legacy path and signal failure here
@@ -1075,7 +1075,7 @@ func (dn *Daemon) update(oldConfig, newConfig *mcfgv1.MachineConfig, skipCertifi
}
var drain bool
- if fg != nil && fg.Enabled(v1.FeatureGateNodeDisruptionPolicy) && nodeDisruptionError == nil {
+ if fg != nil && fg.Enabled(features.FeatureGateNodeDisruptionPolicy) && nodeDisruptionError == nil {
// Check actions list and perform node drain if required
drain, err = isDrainRequiredForNodeDisruptionActions(nodeDisruptionActions, oldIgnConfig, newIgnConfig)
if err != nil {
@@ -1277,7 +1277,7 @@ func (dn *Daemon) update(oldConfig, newConfig *mcfgv1.MachineConfig, skipCertifi
klog.Errorf("Error making MCN for Updated Files and OS: %v", err)
}
- if fg != nil && fg.Enabled(v1.FeatureGateNodeDisruptionPolicy) && nodeDisruptionError == nil {
+ if fg != nil && fg.Enabled(features.FeatureGateNodeDisruptionPolicy) && nodeDisruptionError == nil {
return dn.performPostConfigChangeNodeDisruptionAction(nodeDisruptionActions, newConfig.GetName())
}
// If we're here, FeatureGateNodeDisruptionPolicy is off/errored, so perform legacy action
diff --git a/pkg/daemon/upgrade_monitor_test.go b/pkg/daemon/upgrade_monitor_test.go
index 636ab381e7..0dd246fd51 100644
--- a/pkg/daemon/upgrade_monitor_test.go
+++ b/pkg/daemon/upgrade_monitor_test.go
@@ -7,6 +7,7 @@ import (
apicfgv1 "github.com/openshift/api/config/v1"
"github.com/openshift/machine-config-operator/pkg/upgrademonitor"
+ features "github.com/openshift/api/features"
"github.com/openshift/api/machineconfiguration/v1alpha1"
"github.com/openshift/client-go/machineconfiguration/clientset/versioned/fake"
informers "github.com/openshift/client-go/machineconfiguration/informers/externalversions"
@@ -109,13 +110,13 @@ func (tc upgradeMonitorTestCase) run(t *testing.T) {
f.oclient = mcopfake.NewSimpleClientset(f.objects...)
fgAccess := featuregates.NewHardcodedFeatureGateAccess(
[]apicfgv1.FeatureGateName{
- apicfgv1.FeatureGateMachineConfigNodes,
+ features.FeatureGateMachineConfigNodes,
},
[]apicfgv1.FeatureGateName{
- apicfgv1.FeatureGateExternalCloudProvider,
- apicfgv1.FeatureGateExternalCloudProviderAzure,
- apicfgv1.FeatureGateExternalCloudProviderGCP,
- apicfgv1.FeatureGateExternalCloudProviderExternal,
+ features.FeatureGateExternalCloudProvider,
+ features.FeatureGateExternalCloudProviderAzure,
+ features.FeatureGateExternalCloudProviderGCP,
+ features.FeatureGateExternalCloudProviderExternal,
},
)
diff --git a/pkg/operator/operator_test.go b/pkg/operator/operator_test.go
index 3f258fc3c0..89e61b1a9d 100644
--- a/pkg/operator/operator_test.go
+++ b/pkg/operator/operator_test.go
@@ -5,6 +5,7 @@ import (
"testing"
configv1 "github.com/openshift/api/config/v1"
+ features "github.com/openshift/api/features"
mcfgv1 "github.com/openshift/api/machineconfiguration/v1"
fakeconfigclientset "github.com/openshift/client-go/config/clientset/versioned/fake"
"github.com/openshift/library-go/pkg/operator/configobserver/featuregates"
@@ -24,7 +25,7 @@ func TestMetrics(t *testing.T) {
optr := &Operator{
eventRecorder: &record.FakeRecorder{},
fgAccessor: featuregates.NewHardcodedFeatureGateAccess(
- []configv1.FeatureGateName{configv1.FeatureGatePinnedImages}, []configv1.FeatureGateName{},
+ []configv1.FeatureGateName{features.FeatureGatePinnedImages}, []configv1.FeatureGateName{},
),
}
optr.vStore = newVersionStore()
diff --git a/pkg/operator/status.go b/pkg/operator/status.go
index 2cd0fa4322..69d37ad29c 100644
--- a/pkg/operator/status.go
+++ b/pkg/operator/status.go
@@ -12,6 +12,7 @@ import (
mcfgv1 "github.com/openshift/api/machineconfiguration/v1"
configv1 "github.com/openshift/api/config/v1"
+ features "github.com/openshift/api/features"
cov1helpers "github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers"
"github.com/openshift/library-go/pkg/operator/configobserver/featuregates"
corev1 "k8s.io/api/core/v1"
@@ -828,7 +829,7 @@ func machineConfigPoolStatus(fg featuregates.FeatureGate, pool *mcfgv1.MachineCo
case apihelpers.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolUpdating):
return fmt.Sprintf("%d (ready %d) out of %d nodes are updating to latest configuration %s", pool.Status.UpdatedMachineCount, pool.Status.ReadyMachineCount, pool.Status.MachineCount, pool.Spec.Configuration.Name)
default:
- if fg.Enabled(configv1.FeatureGatePinnedImages) {
+ if fg.Enabled(features.FeatureGatePinnedImages) {
if apihelpers.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolPinnedImageSetsDegraded) {
cond := apihelpers.GetMachineConfigPoolCondition(pool.Status, mcfgv1.MachineConfigPoolPinnedImageSetsDegraded)
return fmt.Sprintf("pool is degraded because pinned image sets failed with %q: %q", cond.Reason, cond.Message)
diff --git a/pkg/operator/status_test.go b/pkg/operator/status_test.go
index 45f2235cf3..9fc5cddafb 100644
--- a/pkg/operator/status_test.go
+++ b/pkg/operator/status_test.go
@@ -21,6 +21,7 @@ import (
apicfgv1 "github.com/openshift/api/config/v1"
configv1 "github.com/openshift/api/config/v1"
+ features "github.com/openshift/api/features"
mcfgv1 "github.com/openshift/api/machineconfiguration/v1"
fakeconfigclientset "github.com/openshift/client-go/config/clientset/versioned/fake"
cov1helpers "github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers"
@@ -193,8 +194,8 @@ func TestIsMachineConfigPoolConfigurationValid(t *testing.T) {
fgAccess := featuregates.NewHardcodedFeatureGateAccess(
[]apicfgv1.FeatureGateName{
- apicfgv1.FeatureGateMachineConfigNodes,
- apicfgv1.FeatureGatePinnedImages,
+ features.FeatureGateMachineConfigNodes,
+ features.FeatureGatePinnedImages,
},
[]apicfgv1.FeatureGateName{},
)
@@ -641,7 +642,7 @@ func TestOperatorSyncStatus(t *testing.T) {
optr := &Operator{
eventRecorder: &record.FakeRecorder{},
fgAccessor: featuregates.NewHardcodedFeatureGateAccess(
- []configv1.FeatureGateName{configv1.FeatureGatePinnedImages}, []configv1.FeatureGateName{},
+ []configv1.FeatureGateName{features.FeatureGatePinnedImages}, []configv1.FeatureGateName{},
),
}
optr.vStore = newVersionStore()
@@ -715,7 +716,7 @@ func TestInClusterBringUpStayOnErr(t *testing.T) {
optr := &Operator{
eventRecorder: &record.FakeRecorder{},
fgAccessor: featuregates.NewHardcodedFeatureGateAccess(
- []configv1.FeatureGateName{configv1.FeatureGatePinnedImages}, []configv1.FeatureGateName{},
+ []configv1.FeatureGateName{features.FeatureGatePinnedImages}, []configv1.FeatureGateName{},
),
}
optr.vStore = newVersionStore()
@@ -780,7 +781,7 @@ func TestKubeletSkewUnSupported(t *testing.T) {
optr := &Operator{
eventRecorder: &record.FakeRecorder{},
fgAccessor: featuregates.NewHardcodedFeatureGateAccess(
- []configv1.FeatureGateName{configv1.FeatureGatePinnedImages}, []configv1.FeatureGateName{},
+ []configv1.FeatureGateName{features.FeatureGatePinnedImages}, []configv1.FeatureGateName{},
),
}
optr.vStore = newVersionStore()
@@ -870,7 +871,7 @@ func TestCustomPoolKubeletSkewUnSupported(t *testing.T) {
optr := &Operator{
eventRecorder: &record.FakeRecorder{},
fgAccessor: featuregates.NewHardcodedFeatureGateAccess(
- []configv1.FeatureGateName{configv1.FeatureGatePinnedImages}, []configv1.FeatureGateName{},
+ []configv1.FeatureGateName{features.FeatureGatePinnedImages}, []configv1.FeatureGateName{},
),
}
optr.vStore = newVersionStore()
@@ -960,7 +961,7 @@ func TestKubeletSkewSupported(t *testing.T) {
optr := &Operator{
eventRecorder: &record.FakeRecorder{},
fgAccessor: featuregates.NewHardcodedFeatureGateAccess(
- []configv1.FeatureGateName{configv1.FeatureGatePinnedImages}, []configv1.FeatureGateName{},
+ []configv1.FeatureGateName{features.FeatureGatePinnedImages}, []configv1.FeatureGateName{},
),
}
optr.vStore = newVersionStore()
diff --git a/pkg/operator/sync.go b/pkg/operator/sync.go
index edfcdecb1b..44f42d3272 100644
--- a/pkg/operator/sync.go
+++ b/pkg/operator/sync.go
@@ -38,6 +38,7 @@ import (
v1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1"
opv1 "github.com/openshift/api/operator/v1"
+ features "github.com/openshift/api/features"
mcoac "github.com/openshift/client-go/operator/applyconfigurations/operator/v1"
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
"github.com/openshift/library-go/pkg/operator/resource/resourceread"
@@ -697,7 +698,7 @@ func (optr *Operator) syncMachineConfigNodes(_ *renderConfig) error {
klog.Errorf("Could not get fg: %v", err)
return err
}
- if !fg.Enabled(configv1.FeatureGateMachineConfigNodes) {
+ if !fg.Enabled(features.FeatureGateMachineConfigNodes) {
return nil
}
nodes, err := optr.nodeLister.List(labels.Everything())
@@ -899,7 +900,7 @@ func (optr *Operator) applyManifests(config *renderConfig, paths manifestPaths)
}
// Only sync validatingadmissionpolicy manifests if ValidatingAdmissionPolicy feature gate is enabled
- if fg.Enabled(configv1.FeatureGateValidatingAdmissionPolicy) {
+ if fg.Enabled(features.FeatureGateValidatingAdmissionPolicy) {
// These new apply functions have a resource cache in case there are duplicate CRs
noCache := resourceapply.NewResourceCache()
@@ -1192,7 +1193,7 @@ func (optr *Operator) reconcileMachineOSBuilder(mob *appsv1.Deployment) error {
}
// Check if OnClusterBuild feature gate is enabled
- if !fg.Enabled(configv1.FeatureGateOnClusterBuild) {
+ if !fg.Enabled(features.FeatureGateOnClusterBuild) {
return nil
}
@@ -2009,7 +2010,7 @@ func (optr *Operator) syncMachineConfiguration(_ *renderConfig) error {
}
// If FeatureGateNodeDisruptionPolicy feature gate is not enabled, no updates will need to be done for the MachineConfiguration object.
- if !fg.Enabled(configv1.FeatureGateNodeDisruptionPolicy) {
+ if !fg.Enabled(features.FeatureGateNodeDisruptionPolicy) {
return nil
}
diff --git a/pkg/upgrademonitor/upgrade_monitor.go b/pkg/upgrademonitor/upgrade_monitor.go
index 8c1a2ea830..24041378d2 100644
--- a/pkg/upgrademonitor/upgrade_monitor.go
+++ b/pkg/upgrademonitor/upgrade_monitor.go
@@ -4,6 +4,7 @@ import (
"context"
"fmt"
+ features "github.com/openshift/api/features"
machineconfigurationalphav1 "github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1"
mcfgclientset "github.com/openshift/client-go/machineconfiguration/clientset/versioned"
"github.com/openshift/library-go/pkg/operator/configobserver/featuregates"
@@ -11,7 +12,6 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/utils/ptr"
- v1 "github.com/openshift/api/config/v1"
mcfgalphav1 "github.com/openshift/api/machineconfiguration/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog/v2"
@@ -75,7 +75,7 @@ func generateAndApplyMachineConfigNodes(
klog.Errorf("Could not get fg: %v", err)
return err
}
- if fg == nil || !fg.Enabled(v1.FeatureGateMachineConfigNodes) {
+ if fg == nil || !fg.Enabled(features.FeatureGateMachineConfigNodes) {
return nil
}
@@ -303,7 +303,7 @@ func GenerateAndApplyMachineConfigNodeSpec(fgAccessor featuregates.FeatureGateAc
klog.Errorf("Could not get fg: %v", err)
return err
}
- if fg == nil || !fg.Enabled(v1.FeatureGateMachineConfigNodes) {
+ if fg == nil || !fg.Enabled(features.FeatureGateMachineConfigNodes) {
klog.Infof("MCN Featuregate is not enabled. Please enable the TechPreviewNoUpgrade featureset to use MachineConfigNodes")
return nil
}
diff --git a/test/e2e-bootstrap/bootstrap_test.go b/test/e2e-bootstrap/bootstrap_test.go
index 89d07c7b7d..677922f432 100644
--- a/test/e2e-bootstrap/bootstrap_test.go
+++ b/test/e2e-bootstrap/bootstrap_test.go
@@ -15,10 +15,12 @@ import (
configv1 "github.com/openshift/api/config/v1"
_ "github.com/openshift/api/config/v1/zz_generated.crd-manifests"
configv1alpha1 "github.com/openshift/api/config/v1alpha1"
+ features "github.com/openshift/api/features"
mcfgv1 "github.com/openshift/api/machineconfiguration/v1"
apioperatorsv1alpha1 "github.com/openshift/api/operator/v1alpha1"
_ "github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests"
- featuregatescontroller "github.com/openshift/cluster-config-operator/pkg/operator/featuregates"
+ featuregatescontroller "github.com/openshift/api/payload-command/render"
+
"github.com/openshift/machine-config-operator/internal/clients"
"github.com/openshift/machine-config-operator/pkg/controller/bootstrap"
ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common"
@@ -574,18 +576,14 @@ func ensureFeatureGate(t *testing.T, clientSet *framework.ClientSet, objs ...run
currentFeatureSet := currentFg.Spec.FeatureSet
- SelfManaged := configv1.ClusterProfileName("include.release.openshift.io/self-managed-high-availability")
+ SelfManaged := features.ClusterProfileName("include.release.openshift.io/self-managed-high-availability")
if err != nil {
t.Fatalf("Error retrieving current feature gates: %v", err)
}
- featureGateEnabledDisabled, err := configv1.FeatureSets(configv1.ClusterProfileName(SelfManaged), currentFeatureSet)
- require.NoError(t, err)
+ featureGateStatus, err := features.FeatureSets(features.ClusterProfileName(SelfManaged), currentFeatureSet)
- featureSetMap := map[configv1.FeatureSet]*configv1.FeatureGateEnabledDisabled{
- currentFeatureSet: featureGateEnabledDisabled,
- }
- currentDetails, err := featuregatescontroller.FeaturesGateDetailsFromFeatureSets(featureSetMap, currentFg, controllerConfig.Spec.ReleaseImage)
require.NoError(t, err)
+ currentDetails := featuregatescontroller.FeaturesGateDetailsFromFeatureSets(featureGateStatus, controllerConfig.Spec.ReleaseImage)
rawDetails := *currentDetails
rawDetails.Version = version.ReleaseVersion
diff --git a/vendor/github.com/openshift/api/config/v1/types_feature.go b/vendor/github.com/openshift/api/config/v1/types_feature.go
index ef2c0cc141..1e03171961 100644
--- a/vendor/github.com/openshift/api/config/v1/types_feature.go
+++ b/vendor/github.com/openshift/api/config/v1/types_feature.go
@@ -148,8 +148,3 @@ type FeatureGateList struct {
Items []FeatureGate `json:"items"`
}
-
-type FeatureGateEnabledDisabled struct {
- Enabled []FeatureGateDescription
- Disabled []FeatureGateDescription
-}
diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go
index c80e66c431..9a81bc559c 100644
--- a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go
+++ b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go
@@ -2039,23 +2039,6 @@ func (in *FeatureGateAttributes) DeepCopy() *FeatureGateAttributes {
return out
}
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *FeatureGateDescription) DeepCopyInto(out *FeatureGateDescription) {
- *out = *in
- out.FeatureGateAttributes = in.FeatureGateAttributes
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateDescription.
-func (in *FeatureGateDescription) DeepCopy() *FeatureGateDescription {
- if in == nil {
- return nil
- }
- out := new(FeatureGateDescription)
- in.DeepCopyInto(out)
- return out
-}
-
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FeatureGateDetails) DeepCopyInto(out *FeatureGateDetails) {
*out = *in
@@ -2082,32 +2065,6 @@ func (in *FeatureGateDetails) DeepCopy() *FeatureGateDetails {
return out
}
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *FeatureGateEnabledDisabled) DeepCopyInto(out *FeatureGateEnabledDisabled) {
- *out = *in
- if in.Enabled != nil {
- in, out := &in.Enabled, &out.Enabled
- *out = make([]FeatureGateDescription, len(*in))
- copy(*out, *in)
- }
- if in.Disabled != nil {
- in, out := &in.Disabled, &out.Disabled
- *out = make([]FeatureGateDescription, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateEnabledDisabled.
-func (in *FeatureGateEnabledDisabled) DeepCopy() *FeatureGateEnabledDisabled {
- if in == nil {
- return nil
- }
- out := new(FeatureGateEnabledDisabled)
- in.DeepCopyInto(out)
- return out
-}
-
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FeatureGateList) DeepCopyInto(out *FeatureGateList) {
*out = *in
diff --git a/vendor/github.com/openshift/api/features.md b/vendor/github.com/openshift/api/features.md
index 6d3bc87896..c32db0ea35 100644
--- a/vendor/github.com/openshift/api/features.md
+++ b/vendor/github.com/openshift/api/features.md
@@ -1,6 +1,14 @@
| FeatureGate | Default on Hypershift | Default on SelfManagedHA | DevPreviewNoUpgrade on Hypershift | DevPreviewNoUpgrade on SelfManagedHA | TechPreviewNoUpgrade on Hypershift | TechPreviewNoUpgrade on SelfManagedHA |
| ------ | --- | --- | --- | --- | --- | --- |
| ClusterAPIInstall| | | | | | |
+| ClusterAPIInstallAWS| | | | | | |
+| ClusterAPIInstallAzure| | | | | | |
+| ClusterAPIInstallGCP| | | | | | |
+| ClusterAPIInstallIBMCloud| | | | | | |
+| ClusterAPIInstallNutanix| | | | | | |
+| ClusterAPIInstallOpenStack| | | | | | |
+| ClusterAPIInstallPowerVS| | | | | | |
+| ClusterAPIInstallVSphere| | | | | | |
| EventedPLEG| | | | | | |
| MachineAPIOperatorDisableMachineHealthCheckController| | | | | | |
| AutomatedEtcdBackup| | | Enabled | Enabled | Enabled | Enabled |
@@ -23,6 +31,7 @@
| ManagedBootImages| | | Enabled | Enabled | Enabled | Enabled |
| MaxUnavailableStatefulSet| | | Enabled | Enabled | Enabled | Enabled |
| MetricsCollectionProfiles| | | Enabled | Enabled | Enabled | Enabled |
+| MetricsServer| | | Enabled | Enabled | Enabled | Enabled |
| MixedCPUsAllocation| | | Enabled | Enabled | Enabled | Enabled |
| NetworkDiagnosticsConfig| | | Enabled | Enabled | Enabled | Enabled |
| NewOLM| | | Enabled | Enabled | Enabled | Enabled |
@@ -55,7 +64,6 @@
| ExternalCloudProviderExternal| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled |
| ExternalCloudProviderGCP| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled |
| KMSv1| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled |
-| MetricsServer| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled |
| NetworkLiveMigration| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled |
| OpenShiftPodSecurityAdmission| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled |
| PrivateHostedZoneAWS| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled |
diff --git a/vendor/github.com/openshift/api/config/v1/feature_gates.go b/vendor/github.com/openshift/api/features/features.go
similarity index 61%
rename from vendor/github.com/openshift/api/config/v1/feature_gates.go
rename to vendor/github.com/openshift/api/features/features.go
index 737c4e3221..2bfe032bb0 100644
--- a/vendor/github.com/openshift/api/config/v1/feature_gates.go
+++ b/vendor/github.com/openshift/api/features/features.go
@@ -1,147 +1,12 @@
-package v1
-
-import "fmt"
-
-// FeatureGateDescription is a golang-only interface used to contains details for a feature gate.
-type FeatureGateDescription struct {
- // FeatureGateAttributes is the information that appears in the API
- FeatureGateAttributes FeatureGateAttributes
-
- // OwningJiraComponent is the jira component that owns most of the impl and first assignment for the bug.
- // This is the team that owns the feature long term.
- OwningJiraComponent string
- // ResponsiblePerson is the person who is on the hook for first contact. This is often, but not always, a team lead.
- // It is someone who can make the promise on the behalf of the team.
- ResponsiblePerson string
- // OwningProduct is the product that owns the lifecycle of the gate.
- OwningProduct OwningProduct
-}
+package features
-type ClusterProfileName string
+import (
+ "fmt"
-var (
- Hypershift = ClusterProfileName("include.release.openshift.io/ibm-cloud-managed")
- SelfManaged = ClusterProfileName("include.release.openshift.io/self-managed-high-availability")
- AllClusterProfiles = []ClusterProfileName{Hypershift, SelfManaged}
+ configv1 "github.com/openshift/api/config/v1"
)
-type OwningProduct string
-
-var (
- ocpSpecific = OwningProduct("OCP")
- kubernetes = OwningProduct("Kubernetes")
-)
-
-type featureGateBuilder struct {
- name string
- owningJiraComponent string
- responsiblePerson string
- owningProduct OwningProduct
-
- statusByClusterProfileByFeatureSet map[ClusterProfileName]map[FeatureSet]bool
-}
-
-// newFeatureGate featuregate are disabled in every FeatureSet and selectively enabled
-func newFeatureGate(name string) *featureGateBuilder {
- b := &featureGateBuilder{
- name: name,
- statusByClusterProfileByFeatureSet: map[ClusterProfileName]map[FeatureSet]bool{},
- }
- for _, clusterProfile := range AllClusterProfiles {
- byFeatureSet := map[FeatureSet]bool{}
- for _, featureSet := range AllFixedFeatureSets {
- byFeatureSet[featureSet] = false
- }
- b.statusByClusterProfileByFeatureSet[clusterProfile] = byFeatureSet
- }
- return b
-}
-
-func (b *featureGateBuilder) reportProblemsToJiraComponent(owningJiraComponent string) *featureGateBuilder {
- b.owningJiraComponent = owningJiraComponent
- return b
-}
-
-func (b *featureGateBuilder) contactPerson(responsiblePerson string) *featureGateBuilder {
- b.responsiblePerson = responsiblePerson
- return b
-}
-
-func (b *featureGateBuilder) productScope(owningProduct OwningProduct) *featureGateBuilder {
- b.owningProduct = owningProduct
- return b
-}
-
-func (b *featureGateBuilder) enableIn(featureSets ...FeatureSet) *featureGateBuilder {
- for clusterProfile := range b.statusByClusterProfileByFeatureSet {
- for _, featureSet := range featureSets {
- b.statusByClusterProfileByFeatureSet[clusterProfile][featureSet] = true
- }
- }
- return b
-}
-
-func (b *featureGateBuilder) enableForClusterProfile(clusterProfile ClusterProfileName, featureSets ...FeatureSet) *featureGateBuilder {
- for _, featureSet := range featureSets {
- b.statusByClusterProfileByFeatureSet[clusterProfile][featureSet] = true
- }
- return b
-}
-
-func (b *featureGateBuilder) register() (FeatureGateName, error) {
- if len(b.name) == 0 {
- return "", fmt.Errorf("missing name")
- }
- if len(b.owningJiraComponent) == 0 {
- return "", fmt.Errorf("missing owningJiraComponent")
- }
- if len(b.responsiblePerson) == 0 {
- return "", fmt.Errorf("missing responsiblePerson")
- }
- if len(b.owningProduct) == 0 {
- return "", fmt.Errorf("missing owningProduct")
- }
-
- featureGateName := FeatureGateName(b.name)
- description := FeatureGateDescription{
- FeatureGateAttributes: FeatureGateAttributes{
- Name: featureGateName,
- },
- OwningJiraComponent: b.owningJiraComponent,
- ResponsiblePerson: b.responsiblePerson,
- OwningProduct: b.owningProduct,
- }
-
- // statusByClusterProfileByFeatureSet is initialized by constructor to be false for every combination
- for clusterProfile, byFeatureSet := range b.statusByClusterProfileByFeatureSet {
- for featureSet, enabled := range byFeatureSet {
- if _, ok := allFeatureGates[clusterProfile]; !ok {
- allFeatureGates[clusterProfile] = map[FeatureSet]*FeatureGateEnabledDisabled{}
- }
- if _, ok := allFeatureGates[clusterProfile][featureSet]; !ok {
- allFeatureGates[clusterProfile][featureSet] = &FeatureGateEnabledDisabled{}
- }
-
- if enabled {
- allFeatureGates[clusterProfile][featureSet].Enabled = append(allFeatureGates[clusterProfile][featureSet].Enabled, description)
- } else {
- allFeatureGates[clusterProfile][featureSet].Disabled = append(allFeatureGates[clusterProfile][featureSet].Disabled, description)
- }
- }
- }
-
- return featureGateName, nil
-}
-
-func (b *featureGateBuilder) mustRegister() FeatureGateName {
- ret, err := b.register()
- if err != nil {
- panic(err)
- }
- return ret
-}
-
-func FeatureSets(clusterProfile ClusterProfileName, featureSet FeatureSet) (*FeatureGateEnabledDisabled, error) {
+func FeatureSets(clusterProfile ClusterProfileName, featureSet configv1.FeatureSet) (*FeatureGateEnabledDisabled, error) {
byFeatureSet, ok := allFeatureGates[clusterProfile]
if !ok {
return nil, fmt.Errorf("no information found for ClusterProfile=%q", clusterProfile)
@@ -153,11 +18,11 @@ func FeatureSets(clusterProfile ClusterProfileName, featureSet FeatureSet) (*Fea
return featureGates.DeepCopy(), nil
}
-func AllFeatureSets() map[ClusterProfileName]map[FeatureSet]*FeatureGateEnabledDisabled {
- ret := map[ClusterProfileName]map[FeatureSet]*FeatureGateEnabledDisabled{}
+func AllFeatureSets() map[ClusterProfileName]map[configv1.FeatureSet]*FeatureGateEnabledDisabled {
+ ret := map[ClusterProfileName]map[configv1.FeatureSet]*FeatureGateEnabledDisabled{}
for clusterProfile, byFeatureSet := range allFeatureGates {
- newByFeatureSet := map[FeatureSet]*FeatureGateEnabledDisabled{}
+ newByFeatureSet := map[configv1.FeatureSet]*FeatureGateEnabledDisabled{}
for featureSet, enabledDisabled := range byFeatureSet {
newByFeatureSet[featureSet] = enabledDisabled.DeepCopy()
@@ -169,132 +34,132 @@ func AllFeatureSets() map[ClusterProfileName]map[FeatureSet]*FeatureGateEnabledD
}
var (
- allFeatureGates = map[ClusterProfileName]map[FeatureSet]*FeatureGateEnabledDisabled{}
+ allFeatureGates = map[ClusterProfileName]map[configv1.FeatureSet]*FeatureGateEnabledDisabled{}
FeatureGateServiceAccountTokenNodeBindingValidation = newFeatureGate("ServiceAccountTokenNodeBindingValidation").
reportProblemsToJiraComponent("apiserver-auth").
contactPerson("stlaz").
productScope(kubernetes).
- enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateServiceAccountTokenNodeBinding = newFeatureGate("ServiceAccountTokenNodeBinding").
reportProblemsToJiraComponent("apiserver-auth").
contactPerson("stlaz").
productScope(kubernetes).
- enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateServiceAccountTokenPodNodeInfo = newFeatureGate("ServiceAccountTokenPodNodeInfo").
reportProblemsToJiraComponent("apiserver-auth").
contactPerson("stlaz").
productScope(kubernetes).
- enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateValidatingAdmissionPolicy = newFeatureGate("ValidatingAdmissionPolicy").
reportProblemsToJiraComponent("kube-apiserver").
contactPerson("benluddy").
productScope(kubernetes).
- enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateGatewayAPI = newFeatureGate("GatewayAPI").
reportProblemsToJiraComponent("Routing").
contactPerson("miciah").
productScope(ocpSpecific).
- enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateOpenShiftPodSecurityAdmission = newFeatureGate("OpenShiftPodSecurityAdmission").
reportProblemsToJiraComponent("auth").
contactPerson("stlaz").
productScope(ocpSpecific).
- enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateExternalCloudProvider = newFeatureGate("ExternalCloudProvider").
reportProblemsToJiraComponent("cloud-provider").
contactPerson("jspeed").
productScope(ocpSpecific).
- enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateExternalCloudProviderAzure = newFeatureGate("ExternalCloudProviderAzure").
reportProblemsToJiraComponent("cloud-provider").
contactPerson("jspeed").
productScope(ocpSpecific).
- enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateExternalCloudProviderGCP = newFeatureGate("ExternalCloudProviderGCP").
reportProblemsToJiraComponent("cloud-provider").
contactPerson("jspeed").
productScope(ocpSpecific).
- enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateExternalCloudProviderExternal = newFeatureGate("ExternalCloudProviderExternal").
reportProblemsToJiraComponent("cloud-provider").
contactPerson("elmiko").
productScope(ocpSpecific).
- enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateCSIDriverSharedResource = newFeatureGate("CSIDriverSharedResource").
reportProblemsToJiraComponent("builds").
contactPerson("adkaplan").
productScope(ocpSpecific).
- enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateBuildCSIVolumes = newFeatureGate("BuildCSIVolumes").
reportProblemsToJiraComponent("builds").
contactPerson("adkaplan").
productScope(ocpSpecific).
- enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateNodeSwap = newFeatureGate("NodeSwap").
reportProblemsToJiraComponent("node").
contactPerson("ehashman").
productScope(kubernetes).
- enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateMachineAPIProviderOpenStack = newFeatureGate("MachineAPIProviderOpenStack").
reportProblemsToJiraComponent("openstack").
contactPerson("egarcia").
productScope(ocpSpecific).
- enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateInsightsConfigAPI = newFeatureGate("InsightsConfigAPI").
reportProblemsToJiraComponent("insights").
contactPerson("tremes").
productScope(ocpSpecific).
- enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateDynamicResourceAllocation = newFeatureGate("DynamicResourceAllocation").
reportProblemsToJiraComponent("scheduling").
contactPerson("jchaloup").
productScope(kubernetes).
- enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateAzureWorkloadIdentity = newFeatureGate("AzureWorkloadIdentity").
reportProblemsToJiraComponent("cloud-credential-operator").
contactPerson("abutcher").
productScope(ocpSpecific).
- enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateMaxUnavailableStatefulSet = newFeatureGate("MaxUnavailableStatefulSet").
reportProblemsToJiraComponent("apps").
contactPerson("atiratree").
productScope(kubernetes).
- enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateEventedPLEG = newFeatureGate("EventedPLEG").
@@ -307,84 +172,84 @@ var (
reportProblemsToJiraComponent("Routing").
contactPerson("miciah").
productScope(ocpSpecific).
- enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateSigstoreImageVerification = newFeatureGate("SigstoreImageVerification").
reportProblemsToJiraComponent("node").
contactPerson("sgrunert").
productScope(ocpSpecific).
- enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateGCPLabelsTags = newFeatureGate("GCPLabelsTags").
reportProblemsToJiraComponent("Installer").
contactPerson("bhb").
productScope(ocpSpecific).
- enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateAlibabaPlatform = newFeatureGate("AlibabaPlatform").
reportProblemsToJiraComponent("cloud-provider").
contactPerson("jspeed").
productScope(ocpSpecific).
- enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateCloudDualStackNodeIPs = newFeatureGate("CloudDualStackNodeIPs").
reportProblemsToJiraComponent("machine-config-operator/platform-baremetal").
contactPerson("mkowalsk").
productScope(kubernetes).
- enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateVSphereStaticIPs = newFeatureGate("VSphereStaticIPs").
reportProblemsToJiraComponent("splat").
contactPerson("rvanderp3").
productScope(ocpSpecific).
- enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateRouteExternalCertificate = newFeatureGate("RouteExternalCertificate").
reportProblemsToJiraComponent("router").
contactPerson("thejasn").
productScope(ocpSpecific).
- enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateAdminNetworkPolicy = newFeatureGate("AdminNetworkPolicy").
reportProblemsToJiraComponent("Networking/ovn-kubernetes").
contactPerson("tssurya").
productScope(ocpSpecific).
- enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateNetworkLiveMigration = newFeatureGate("NetworkLiveMigration").
reportProblemsToJiraComponent("Networking/ovn-kubernetes").
contactPerson("pliu").
productScope(ocpSpecific).
- enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateNetworkDiagnosticsConfig = newFeatureGate("NetworkDiagnosticsConfig").
reportProblemsToJiraComponent("Networking/cluster-network-operator").
contactPerson("kyrtapz").
productScope(ocpSpecific).
- enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateHardwareSpeed = newFeatureGate("HardwareSpeed").
reportProblemsToJiraComponent("etcd").
contactPerson("hasbro17").
productScope(ocpSpecific).
- enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateAutomatedEtcdBackup = newFeatureGate("AutomatedEtcdBackup").
reportProblemsToJiraComponent("etcd").
contactPerson("hasbro17").
productScope(ocpSpecific).
- enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateMachineAPIOperatorDisableMachineHealthCheckController = newFeatureGate("MachineAPIOperatorDisableMachineHealthCheckController").
@@ -397,21 +262,21 @@ var (
reportProblemsToJiraComponent("dns").
contactPerson("miciah").
productScope(ocpSpecific).
- enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateVSphereControlPlaneMachineset = newFeatureGate("VSphereControlPlaneMachineSet").
reportProblemsToJiraComponent("splat").
contactPerson("rvanderp3").
productScope(ocpSpecific).
- enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateMachineConfigNodes = newFeatureGate("MachineConfigNodes").
reportProblemsToJiraComponent("MachineConfigOperator").
contactPerson("cdoern").
productScope(ocpSpecific).
- enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateClusterAPIInstall = newFeatureGate("ClusterAPIInstall").
@@ -424,175 +289,223 @@ var (
reportProblemsToJiraComponent("Monitoring").
contactPerson("slashpai").
productScope(ocpSpecific).
- enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateInstallAlternateInfrastructureAWS = newFeatureGate("InstallAlternateInfrastructureAWS").
reportProblemsToJiraComponent("Installer").
contactPerson("padillon").
productScope(ocpSpecific).
- enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateGCPClusterHostedDNS = newFeatureGate("GCPClusterHostedDNS").
reportProblemsToJiraComponent("Installer").
contactPerson("barbacbd").
productScope(ocpSpecific).
- enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateMixedCPUsAllocation = newFeatureGate("MixedCPUsAllocation").
reportProblemsToJiraComponent("NodeTuningOperator").
contactPerson("titzhak").
productScope(ocpSpecific).
- enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateManagedBootImages = newFeatureGate("ManagedBootImages").
reportProblemsToJiraComponent("MachineConfigOperator").
contactPerson("djoshy").
productScope(ocpSpecific).
- enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateDisableKubeletCloudCredentialProviders = newFeatureGate("DisableKubeletCloudCredentialProviders").
reportProblemsToJiraComponent("cloud-provider").
contactPerson("jspeed").
productScope(kubernetes).
- enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateOnClusterBuild = newFeatureGate("OnClusterBuild").
reportProblemsToJiraComponent("MachineConfigOperator").
contactPerson("dkhater").
productScope(ocpSpecific).
- enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateSignatureStores = newFeatureGate("SignatureStores").
reportProblemsToJiraComponent("Cluster Version Operator").
contactPerson("lmohanty").
productScope(ocpSpecific).
- enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateKMSv1 = newFeatureGate("KMSv1").
reportProblemsToJiraComponent("kube-apiserver").
contactPerson("dgrisonnet").
productScope(kubernetes).
- enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGatePinnedImages = newFeatureGate("PinnedImages").
reportProblemsToJiraComponent("MachineConfigOperator").
contactPerson("jhernand").
productScope(ocpSpecific).
- enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateUpgradeStatus = newFeatureGate("UpgradeStatus").
reportProblemsToJiraComponent("Cluster Version Operator").
contactPerson("pmuller").
productScope(ocpSpecific).
- enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateTranslateStreamCloseWebsocketRequests = newFeatureGate("TranslateStreamCloseWebsocketRequests").
reportProblemsToJiraComponent("kube-apiserver").
contactPerson("akashem").
productScope(kubernetes).
- enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateVolumeGroupSnapshot = newFeatureGate("VolumeGroupSnapshot").
reportProblemsToJiraComponent("Storage / Kubernetes External Components").
contactPerson("fbertina").
productScope(kubernetes).
- enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateExternalOIDC = newFeatureGate("ExternalOIDC").
reportProblemsToJiraComponent("authentication").
contactPerson("stlaz").
productScope(ocpSpecific).
- enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade).
- enableForClusterProfile(Hypershift, Default, TechPreviewNoUpgrade).
+ enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
+ enableForClusterProfile(Hypershift, configv1.Default, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateExample = newFeatureGate("Example").
reportProblemsToJiraComponent("cluster-config").
contactPerson("deads").
productScope(ocpSpecific).
- enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGatePlatformOperators = newFeatureGate("PlatformOperators").
reportProblemsToJiraComponent("olm").
contactPerson("joe").
productScope(ocpSpecific).
- enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateNewOLM = newFeatureGate("NewOLM").
reportProblemsToJiraComponent("olm").
contactPerson("joe").
productScope(ocpSpecific).
- enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateExternalRouteCertificate = newFeatureGate("ExternalRouteCertificate").
reportProblemsToJiraComponent("network-edge").
contactPerson("miciah").
productScope(ocpSpecific).
- enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateInsightsOnDemandDataGather = newFeatureGate("InsightsOnDemandDataGather").
reportProblemsToJiraComponent("insights").
contactPerson("tremes").
productScope(ocpSpecific).
- enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateBareMetalLoadBalancer = newFeatureGate("BareMetalLoadBalancer").
reportProblemsToJiraComponent("metal").
contactPerson("EmilienM").
productScope(ocpSpecific).
- enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateInsightsConfig = newFeatureGate("InsightsConfig").
reportProblemsToJiraComponent("insights").
contactPerson("tremes").
productScope(ocpSpecific).
- enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateImagePolicy = newFeatureGate("ImagePolicy").
reportProblemsToJiraComponent("node").
contactPerson("rphillips").
productScope(ocpSpecific).
- enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateNodeDisruptionPolicy = newFeatureGate("NodeDisruptionPolicy").
reportProblemsToJiraComponent("MachineConfigOperator").
contactPerson("jerzhang").
productScope(ocpSpecific).
- enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateMetricsCollectionProfiles = newFeatureGate("MetricsCollectionProfiles").
reportProblemsToJiraComponent("Monitoring").
contactPerson("rexagod").
productScope(ocpSpecific).
- enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
mustRegister()
FeatureGateVSphereDriverConfiguration = newFeatureGate("VSphereDriverConfiguration").
reportProblemsToJiraComponent("Storage / Kubernetes External Components").
contactPerson("rbednar").
productScope(ocpSpecific).
- enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade).
+ enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade).
+ mustRegister()
+
+ FeatureGateClusterAPIInstallAWS = newFeatureGate("ClusterAPIInstallAWS").
+ reportProblemsToJiraComponent("Installer").
+ contactPerson("r4f4").
+ productScope(ocpSpecific).
+ mustRegister()
+
+ FeatureGateClusterAPIInstallAzure = newFeatureGate("ClusterAPIInstallAzure").
+ reportProblemsToJiraComponent("Installer").
+ contactPerson("jhixson74").
+ productScope(ocpSpecific).
+ mustRegister()
+
+ FeatureGateClusterAPIInstallGCP = newFeatureGate("ClusterAPIInstallGCP").
+ reportProblemsToJiraComponent("Installer").
+ contactPerson("bfournie").
+ productScope(ocpSpecific).
+ mustRegister()
+
+ FeatureGateClusterAPIInstallIBMCloud = newFeatureGate("ClusterAPIInstallIBMCloud").
+ reportProblemsToJiraComponent("Installer").
+ contactPerson("cjschaef").
+ productScope(ocpSpecific).
+ mustRegister()
+
+ FeatureGateClusterAPIInstallNutanix = newFeatureGate("ClusterAPIInstallNutanix").
+ reportProblemsToJiraComponent("Installer").
+ contactPerson("yanhua121").
+ productScope(ocpSpecific).
+ mustRegister()
+
+ FeatureGateClusterAPIInstallOpenStack = newFeatureGate("ClusterAPIInstallOpenStack").
+ reportProblemsToJiraComponent("Installer").
+ contactPerson("stephenfin").
+ productScope(ocpSpecific).
+ mustRegister()
+
+ FeatureGateClusterAPIInstallPowerVS = newFeatureGate("ClusterAPIInstallPowerVS").
+ reportProblemsToJiraComponent("Installer").
+ contactPerson("mjturek").
+ productScope(ocpSpecific).
+ mustRegister()
+
+ FeatureGateClusterAPIInstallVSphere = newFeatureGate("ClusterAPIInstallVSphere").
+ reportProblemsToJiraComponent("Installer").
+ contactPerson("rvanderp3").
+ productScope(ocpSpecific).
mustRegister()
)
diff --git a/vendor/github.com/openshift/api/features/util.go b/vendor/github.com/openshift/api/features/util.go
new file mode 100644
index 0000000000..d8d8e94a0e
--- /dev/null
+++ b/vendor/github.com/openshift/api/features/util.go
@@ -0,0 +1,193 @@
+package features
+
+import (
+ "fmt"
+ configv1 "github.com/openshift/api/config/v1"
+)
+
+// FeatureGateDescription is a golang-only interface used to contains details for a feature gate.
+type FeatureGateDescription struct {
+ // FeatureGateAttributes is the information that appears in the API
+ FeatureGateAttributes configv1.FeatureGateAttributes
+
+ // OwningJiraComponent is the jira component that owns most of the impl and first assignment for the bug.
+ // This is the team that owns the feature long term.
+ OwningJiraComponent string
+ // ResponsiblePerson is the person who is on the hook for first contact. This is often, but not always, a team lead.
+ // It is someone who can make the promise on the behalf of the team.
+ ResponsiblePerson string
+ // OwningProduct is the product that owns the lifecycle of the gate.
+ OwningProduct OwningProduct
+}
+
+type FeatureGateEnabledDisabled struct {
+ Enabled []FeatureGateDescription
+ Disabled []FeatureGateDescription
+}
+
+type ClusterProfileName string
+
+var (
+ Hypershift = ClusterProfileName("include.release.openshift.io/ibm-cloud-managed")
+ SelfManaged = ClusterProfileName("include.release.openshift.io/self-managed-high-availability")
+ AllClusterProfiles = []ClusterProfileName{Hypershift, SelfManaged}
+)
+
+type OwningProduct string
+
+var (
+ ocpSpecific = OwningProduct("OCP")
+ kubernetes = OwningProduct("Kubernetes")
+)
+
+type featureGateBuilder struct {
+ name string
+ owningJiraComponent string
+ responsiblePerson string
+ owningProduct OwningProduct
+
+ statusByClusterProfileByFeatureSet map[ClusterProfileName]map[configv1.FeatureSet]bool
+}
+
+// newFeatureGate featuregate are disabled in every FeatureSet and selectively enabled
+func newFeatureGate(name string) *featureGateBuilder {
+ b := &featureGateBuilder{
+ name: name,
+ statusByClusterProfileByFeatureSet: map[ClusterProfileName]map[configv1.FeatureSet]bool{},
+ }
+ for _, clusterProfile := range AllClusterProfiles {
+ byFeatureSet := map[configv1.FeatureSet]bool{}
+ for _, featureSet := range configv1.AllFixedFeatureSets {
+ byFeatureSet[featureSet] = false
+ }
+ b.statusByClusterProfileByFeatureSet[clusterProfile] = byFeatureSet
+ }
+ return b
+}
+
+func (b *featureGateBuilder) reportProblemsToJiraComponent(owningJiraComponent string) *featureGateBuilder {
+ b.owningJiraComponent = owningJiraComponent
+ return b
+}
+
+func (b *featureGateBuilder) contactPerson(responsiblePerson string) *featureGateBuilder {
+ b.responsiblePerson = responsiblePerson
+ return b
+}
+
+func (b *featureGateBuilder) productScope(owningProduct OwningProduct) *featureGateBuilder {
+ b.owningProduct = owningProduct
+ return b
+}
+
+func (b *featureGateBuilder) enableIn(featureSets ...configv1.FeatureSet) *featureGateBuilder {
+ for clusterProfile := range b.statusByClusterProfileByFeatureSet {
+ for _, featureSet := range featureSets {
+ b.statusByClusterProfileByFeatureSet[clusterProfile][featureSet] = true
+ }
+ }
+ return b
+}
+
+func (b *featureGateBuilder) enableForClusterProfile(clusterProfile ClusterProfileName, featureSets ...configv1.FeatureSet) *featureGateBuilder {
+ for _, featureSet := range featureSets {
+ b.statusByClusterProfileByFeatureSet[clusterProfile][featureSet] = true
+ }
+ return b
+}
+
+func (b *featureGateBuilder) register() (configv1.FeatureGateName, error) {
+ if len(b.name) == 0 {
+ return "", fmt.Errorf("missing name")
+ }
+ if len(b.owningJiraComponent) == 0 {
+ return "", fmt.Errorf("missing owningJiraComponent")
+ }
+ if len(b.responsiblePerson) == 0 {
+ return "", fmt.Errorf("missing responsiblePerson")
+ }
+ if len(b.owningProduct) == 0 {
+ return "", fmt.Errorf("missing owningProduct")
+ }
+
+ featureGateName := configv1.FeatureGateName(b.name)
+ description := FeatureGateDescription{
+ FeatureGateAttributes: configv1.FeatureGateAttributes{
+ Name: featureGateName,
+ },
+ OwningJiraComponent: b.owningJiraComponent,
+ ResponsiblePerson: b.responsiblePerson,
+ OwningProduct: b.owningProduct,
+ }
+
+ // statusByClusterProfileByFeatureSet is initialized by constructor to be false for every combination
+ for clusterProfile, byFeatureSet := range b.statusByClusterProfileByFeatureSet {
+ for featureSet, enabled := range byFeatureSet {
+ if _, ok := allFeatureGates[clusterProfile]; !ok {
+ allFeatureGates[clusterProfile] = map[configv1.FeatureSet]*FeatureGateEnabledDisabled{}
+ }
+ if _, ok := allFeatureGates[clusterProfile][featureSet]; !ok {
+ allFeatureGates[clusterProfile][featureSet] = &FeatureGateEnabledDisabled{}
+ }
+
+ if enabled {
+ allFeatureGates[clusterProfile][featureSet].Enabled = append(allFeatureGates[clusterProfile][featureSet].Enabled, description)
+ } else {
+ allFeatureGates[clusterProfile][featureSet].Disabled = append(allFeatureGates[clusterProfile][featureSet].Disabled, description)
+ }
+ }
+ }
+
+ return featureGateName, nil
+}
+
+func (b *featureGateBuilder) mustRegister() configv1.FeatureGateName {
+ ret, err := b.register()
+ if err != nil {
+ panic(err)
+ }
+ return ret
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FeatureGateEnabledDisabled) DeepCopyInto(out *FeatureGateEnabledDisabled) {
+ *out = *in
+ if in.Enabled != nil {
+ in, out := &in.Enabled, &out.Enabled
+ *out = make([]FeatureGateDescription, len(*in))
+ copy(*out, *in)
+ }
+ if in.Disabled != nil {
+ in, out := &in.Disabled, &out.Disabled
+ *out = make([]FeatureGateDescription, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateEnabledDisabled.
+func (in *FeatureGateEnabledDisabled) DeepCopy() *FeatureGateEnabledDisabled {
+ if in == nil {
+ return nil
+ }
+ out := new(FeatureGateEnabledDisabled)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FeatureGateDescription) DeepCopyInto(out *FeatureGateDescription) {
+ *out = *in
+ out.FeatureGateAttributes = in.FeatureGateAttributes
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateDescription.
+func (in *FeatureGateDescription) DeepCopy() *FeatureGateDescription {
+ if in == nil {
+ return nil
+ }
+ out := new(FeatureGateDescription)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_machineosbuild.go b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_machineosbuild.go
index c18677a9c8..82ae150c82 100644
--- a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_machineosbuild.go
+++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_machineosbuild.go
@@ -16,9 +16,8 @@ import (
// +kubebuilder:metadata:labels=openshift.io/operator-managed=
// +kubebuilder:printcolumn:name="Prepared",type="string",JSONPath=.status.conditions[?(@.type=="Prepared")].status
// +kubebuilder:printcolumn:name="Building",type="string",JSONPath=.status.conditions[?(@.type=="Building")].status
-// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=.status.conditions[?(@.type=="Ready")].status
+// +kubebuilder:printcolumn:name="Succeeded",type="string",JSONPath=.status.conditions[?(@.type=="Succeeded")].status
// +kubebuilder:printcolumn:name="Interrupted",type="string",JSONPath=.status.conditions[?(@.type=="Interrupted")].status
-// +kubebuilder:printcolumn:name="Restarted",type="string",JSONPath=.status.conditions[?(@.type=="Restarted")].status
// +kubebuilder:printcolumn:name="Failed",type="string",JSONPath=.status.conditions[?(@.type=="Failed")].status
// MachineOSBuild describes a build process managed and deployed by the MCO
diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosbuilds-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosbuilds-CustomNoUpgrade.crd.yaml
index 51cbc46017..d35b0d2a7e 100644
--- a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosbuilds-CustomNoUpgrade.crd.yaml
+++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosbuilds-CustomNoUpgrade.crd.yaml
@@ -26,15 +26,12 @@ spec:
- jsonPath: .status.conditions[?(@.type=="Building")].status
name: Building
type: string
- - jsonPath: .status.conditions[?(@.type=="Ready")].status
- name: Ready
+ - jsonPath: .status.conditions[?(@.type=="Succeeded")].status
+ name: Succeeded
type: string
- jsonPath: .status.conditions[?(@.type=="Interrupted")].status
name: Interrupted
type: string
- - jsonPath: .status.conditions[?(@.type=="Restarted")].status
- name: Restarted
- type: string
- jsonPath: .status.conditions[?(@.type=="Failed")].status
name: Failed
type: string
diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosbuilds-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosbuilds-DevPreviewNoUpgrade.crd.yaml
index f274fba11a..0360af3a04 100644
--- a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosbuilds-DevPreviewNoUpgrade.crd.yaml
+++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosbuilds-DevPreviewNoUpgrade.crd.yaml
@@ -26,15 +26,12 @@ spec:
- jsonPath: .status.conditions[?(@.type=="Building")].status
name: Building
type: string
- - jsonPath: .status.conditions[?(@.type=="Ready")].status
- name: Ready
+ - jsonPath: .status.conditions[?(@.type=="Succeeded")].status
+ name: Succeeded
type: string
- jsonPath: .status.conditions[?(@.type=="Interrupted")].status
name: Interrupted
type: string
- - jsonPath: .status.conditions[?(@.type=="Restarted")].status
- name: Restarted
- type: string
- jsonPath: .status.conditions[?(@.type=="Failed")].status
name: Failed
type: string
diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosbuilds-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosbuilds-TechPreviewNoUpgrade.crd.yaml
index ffd58ed34e..bb45b00475 100644
--- a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosbuilds-TechPreviewNoUpgrade.crd.yaml
+++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosbuilds-TechPreviewNoUpgrade.crd.yaml
@@ -26,15 +26,12 @@ spec:
- jsonPath: .status.conditions[?(@.type=="Building")].status
name: Building
type: string
- - jsonPath: .status.conditions[?(@.type=="Ready")].status
- name: Ready
+ - jsonPath: .status.conditions[?(@.type=="Succeeded")].status
+ name: Succeeded
type: string
- jsonPath: .status.conditions[?(@.type=="Interrupted")].status
name: Interrupted
type: string
- - jsonPath: .status.conditions[?(@.type=="Restarted")].status
- name: Restarted
- type: string
- jsonPath: .status.conditions[?(@.type=="Failed")].status
name: Failed
type: string
diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.featuregated-crd-manifests.yaml
index 1b07959f5f..68c8828e55 100644
--- a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.featuregated-crd-manifests.yaml
+++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.featuregated-crd-manifests.yaml
@@ -92,15 +92,12 @@ machineosbuilds.machineconfiguration.openshift.io:
- jsonPath: .status.conditions[?(@.type=="Building")].status
name: Building
type: string
- - jsonPath: .status.conditions[?(@.type=="Ready")].status
- name: Ready
+ - jsonPath: .status.conditions[?(@.type=="Succeeded")].status
+ name: Succeeded
type: string
- jsonPath: .status.conditions[?(@.type=="Interrupted")].status
name: Interrupted
type: string
- - jsonPath: .status.conditions[?(@.type=="Restarted")].status
- name: Restarted
- type: string
- jsonPath: .status.conditions[?(@.type=="Failed")].status
name: Failed
type: string
diff --git a/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go b/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go
index 5afc154dcd..8bc06a4ec2 100644
--- a/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go
+++ b/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go
@@ -351,7 +351,7 @@ type NodeDisruptionPolicyStatusSSHKey struct {
// +union
type NodeDisruptionPolicySpecAction struct {
// type represents the commands that will be carried out if this NodeDisruptionPolicySpecActionType is executed
- // Valid value are Reboot, Drain, Reload, Restart, DaemonReload, None and Special
+ // Valid values are Reboot, Drain, Reload, Restart, DaemonReload and None.
// reload/restart requires a corresponding service target specified in the reload/restart field.
// Other values require no further configuration
// +unionDiscriminator
@@ -370,7 +370,7 @@ type NodeDisruptionPolicySpecAction struct {
// +union
type NodeDisruptionPolicyStatusAction struct {
// type represents the commands that will be carried out if this NodeDisruptionPolicyStatusActionType is executed
- // Valid value are Reboot, Drain, Reload, Restart, DaemonReload, None and Special
+ // Valid values are Reboot, Drain, Reload, Restart, DaemonReload, None and Special.
// reload/restart requires a corresponding service target specified in the reload/restart field.
// Other values require no further configuration
// +unionDiscriminator
diff --git a/vendor/github.com/openshift/api/operator/v1/types_network.go b/vendor/github.com/openshift/api/operator/v1/types_network.go
index 970be707e7..b810e7859a 100644
--- a/vendor/github.com/openshift/api/operator/v1/types_network.go
+++ b/vendor/github.com/openshift/api/operator/v1/types_network.go
@@ -135,7 +135,7 @@ const (
)
// NetworkMigration represents the cluster network configuration.
-// +openshift:validation:FeatureGateAwareXValidation:featureGate=NetworkLiveMigration,rule="!has(self.mtu) || !has(self.networkType) || self.networkType == '' || has(self.mode) && self.mode == 'Live'",message="networkType migration in mode other than 'Live' may not be configured at the same time as mtu migration"
+// +openshift:validation:FeatureGateAwareXValidation:featureGate=NetworkLiveMigration,rule="!has(self.mtu) || !has(self.networkType) || self.networkType == \"\" || has(self.mode) && self.mode == 'Live'",message="networkType migration in mode other than 'Live' may not be configured at the same time as mtu migration"
type NetworkMigration struct {
// networkType is the target type of network migration. Set this to the
// target network type to allow changing the default network. If unset, the
@@ -450,8 +450,8 @@ type IPv4OVNKubernetesConfig struct {
// The value must be in proper IPV4 CIDR format
// +kubebuilder:validation:MaxLength=18
// +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 4",message="Subnet must be in valid IPV4 CIDR format"
- // +kubebuilder:validation:XValidation:rule="[self.findAll('[0-9]+')[0]].all(x, x != '0' && int(x) <= 255 && !x.startsWith('0'))",message="first IP address octet must not contain leading zeros, must be greater than 0 and less or equal to 255"
- // +kubebuilder:validation:XValidation:rule="[int(self.split('/')[1])].all(x, x <= 30 && x >= 0)",message="subnet must be in the range /0 to /30 inclusive"
+ // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).prefixLength() <= 30",message="subnet must be in the range /0 to /30 inclusive"
+ // +kubebuilder:validation:XValidation:rule="isCIDR(self) && int(self.split('.')[0]) > 0",message="first IP address octet must not be 0"
// +optional
InternalTransitSwitchSubnet string `json:"internalTransitSwitchSubnet,omitempty"`
// internalJoinSubnet is a v4 subnet used internally by ovn-kubernetes in case the
@@ -464,8 +464,8 @@ type IPv4OVNKubernetesConfig struct {
// The value must be in proper IPV4 CIDR format
// +kubebuilder:validation:MaxLength=18
// +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 4",message="Subnet must be in valid IPV4 CIDR format"
- // +kubebuilder:validation:XValidation:rule="[self.findAll('[0-9]+')[0]].all(x, x != '0' && int(x) <= 255 && !x.startsWith('0'))",message="first IP address octet must not contain leading zeros, must be greater than 0 and less or equal to 255"
- // +kubebuilder:validation:XValidation:rule="[int(self.split('/')[1])].all(x, x <= 30 && x >= 0)",message="subnet must be in the range /0 to /30 inclusive"
+ // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).prefixLength() <= 30",message="subnet must be in the range /0 to /30 inclusive"
+ // +kubebuilder:validation:XValidation:rule="isCIDR(self) && int(self.split('.')[0]) > 0",message="first IP address octet must not be 0"
// +optional
InternalJoinSubnet string `json:"internalJoinSubnet,omitempty"`
}
@@ -484,10 +484,8 @@ type IPv6OVNKubernetesConfig struct {
// The value must be in proper IPV6 CIDR format
// Note that IPV6 dual addresses are not permitted
// +kubebuilder:validation:MaxLength=48
- // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 6",message="Subnet must be in valid IPV6 CIDR format"
- // +kubebuilder:validation:XValidation:rule="self.split('/').size() == 2 && [int(self.split('/')[1])].all(x, x <= 125 && x >= 0)",message="subnet must be in the range /0 to /125 inclusive"
- // +kubebuilder:validation:XValidation:rule="self.contains('::') ? self.split('/')[0].split(':').size() <= 8 : self.split('/')[0].split(':').size() == 8",message="a valid IPv6 address must contain 8 segments unless elided (::), in which case it must contain at most 6 non-empty segments"
- // +kubebuilder:validation:XValidation:rule="!self.contains('.')",message="IPv6 dual addresses are not permitted, value should not contain `.` characters"
+ // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 6",message="Subnet must be in valid IPV6 CIDR format"
+ // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).prefixLength() <= 125",message="subnet must be in the range /0 to /125 inclusive"
// +optional
InternalTransitSwitchSubnet string `json:"internalTransitSwitchSubnet,omitempty"`
// internalJoinSubnet is a v6 subnet used internally by ovn-kubernetes in case the
@@ -501,9 +499,7 @@ type IPv6OVNKubernetesConfig struct {
// Note that IPV6 dual addresses are not permitted
// +kubebuilder:validation:MaxLength=48
// +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 6",message="Subnet must be in valid IPV6 CIDR format"
- // +kubebuilder:validation:XValidation:rule="self.split('/').size() == 2 && [int(self.split('/')[1])].all(x, x <= 125 && x >= 0)",message="subnet must be in the range /0 to /125 inclusive"
- // +kubebuilder:validation:XValidation:rule="self.contains('::') ? self.split('/')[0].split(':').size() <= 8 : self.split('/')[0].split(':').size() == 8",message="a valid IPv6 address must contain 8 segments unless elided (::), in which case it must contain at most 6 non-empty segments"
- // +kubebuilder:validation:XValidation:rule="!self.contains('.')",message="IPv6 dual addresses are not permitted, value should not contain `.` characters"
+ // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).prefixLength() <= 125",message="subnet must be in the range /0 to /125 inclusive"
// +optional
InternalJoinSubnet string `json:"internalJoinSubnet,omitempty"`
}
@@ -581,11 +577,9 @@ type IPv4GatewayConfig struct {
// The current default subnet is 169.254.169.0/29
// The value must be in proper IPV4 CIDR format
// +kubebuilder:validation:MaxLength=18
- // +kubebuilder:validation:XValidation:rule="self.indexOf('/') == self.lastIndexOf('/')",message="CIDR format must contain exactly one '/'"
- // +kubebuilder:validation:XValidation:rule="[int(self.split('/')[1])].all(x, x <= 29 && x >= 0)",message="subnet must be in the range /0 to /29 inclusive"
- // +kubebuilder:validation:XValidation:rule="self.split('/')[0].split('.').size() == 4",message="a valid IPv4 address must contain 4 octets"
- // +kubebuilder:validation:XValidation:rule="[self.findAll('[0-9]+')[0]].all(x, x != '0' && int(x) <= 255 && !x.startsWith('0'))",message="first IP address octet must not contain leading zeros, must be greater than 0 and less or equal to 255"
- // +kubebuilder:validation:XValidation:rule="[self.findAll('[0-9]+')[1], self.findAll('[0-9]+')[2], self.findAll('[0-9]+')[3]].all(x, int(x) <= 255 && (x == '0' || !x.startsWith('0')))",message="IP address octets must not contain leading zeros, and must be less or equal to 255"
+ // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 4",message="Subnet must be in valid IPV4 CIDR format"
+ // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).prefixLength() <= 29",message="subnet must be in the range /0 to /29 inclusive"
+ // +kubebuilder:validation:XValidation:rule="isCIDR(self) && int(self.split('.')[0]) > 0",message="first IP address octet must not be 0"
// +optional
InternalMasqueradeSubnet string `json:"internalMasqueradeSubnet,omitempty"`
}
@@ -601,19 +595,8 @@ type IPv6GatewayConfig struct {
// When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time.
// The current default subnet is fd69::/125
// Note that IPV6 dual addresses are not permitted
- // +kubebuilder:validation:XValidation:rule="self.indexOf('/') == self.lastIndexOf('/')",message="CIDR format must contain exactly one '/'"
- // +kubebuilder:validation:XValidation:rule="self.split('/').size() == 2 && [int(self.split('/')[1])].all(x, x <= 125 && x >= 0)",message="subnet must be in the range /0 to /125 inclusive"
- // +kubebuilder:validation:XValidation:rule="self.indexOf('::') == self.lastIndexOf('::')",message="IPv6 addresses must contain at most one '::' and may only be shortened once"
- // +kubebuilder:validation:XValidation:rule="self.contains('::') ? self.split('/')[0].split(':').size() <= 8 : self.split('/')[0].split(':').size() == 8",message="a valid IPv6 address must contain 8 segments unless elided (::), in which case it must contain at most 6 non-empty segments"
- // +kubebuilder:validation:XValidation:rule="self.split('/')[0].split(':').size() >=1 ? [self.split('/')[0].split(':', 8)[0]].all(x, x == '' || (x.matches('^[0-9A-Fa-f]{1,4}$')) && size(x)<5 ) : true",message="each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 1"
- // +kubebuilder:validation:XValidation:rule="self.split('/')[0].split(':').size() >=2 ? [self.split('/')[0].split(':', 8)[1]].all(x, x == '' || (x.matches('^[0-9A-Fa-f]{1,4}$')) && size(x)<5 ) : true",message="each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 2"
- // +kubebuilder:validation:XValidation:rule="self.split('/')[0].split(':').size() >=3 ? [self.split('/')[0].split(':', 8)[2]].all(x, x == '' || (x.matches('^[0-9A-Fa-f]{1,4}$')) && size(x)<5 ) : true",message="each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 3"
- // +kubebuilder:validation:XValidation:rule="self.split('/')[0].split(':').size() >=4 ? [self.split('/')[0].split(':', 8)[3]].all(x, x == '' || (x.matches('^[0-9A-Fa-f]{1,4}$')) && size(x)<5 ) : true",message="each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 4"
- // +kubebuilder:validation:XValidation:rule="self.split('/')[0].split(':').size() >=5 ? [self.split('/')[0].split(':', 8)[4]].all(x, x == '' || (x.matches('^[0-9A-Fa-f]{1,4}$')) && size(x)<5 ) : true",message="each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 5"
- // +kubebuilder:validation:XValidation:rule="self.split('/')[0].split(':').size() >=6 ? [self.split('/')[0].split(':', 8)[5]].all(x, x == '' || (x.matches('^[0-9A-Fa-f]{1,4}$')) && size(x)<5 ) : true",message="each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 6"
- // +kubebuilder:validation:XValidation:rule="self.split('/')[0].split(':').size() >=7 ? [self.split('/')[0].split(':', 8)[6]].all(x, x == '' || (x.matches('^[0-9A-Fa-f]{1,4}$')) && size(x)<5 ) : true",message="each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 7"
- // +kubebuilder:validation:XValidation:rule="self.split('/')[0].split(':').size() >=8 ? [self.split('/')[0].split(':', 8)[7]].all(x, x == '' || (x.matches('^[0-9A-Fa-f]{1,4}$')) && size(x)<5 ) : true",message="each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 8"
- // +kubebuilder:validation:XValidation:rule="!self.contains('.')",message="IPv6 dual addresses are not permitted, value should not contain `.` characters"
+ // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 6",message="Subnet must be in valid IPV6 CIDR format"
+ // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).prefixLength() <= 125",message="subnet must be in the range /0 to /125 inclusive"
// +optional
InternalMasqueradeSubnet string `json:"internalMasqueradeSubnet,omitempty"`
}
diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_70_network_01_networks.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_70_network_01_networks.crd.yaml
index 5cbe491c57..a720852455 100644
--- a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_70_network_01_networks.crd.yaml
+++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_70_network_01_networks.crd.yaml
@@ -278,23 +278,15 @@ spec:
maxLength: 18
type: string
x-kubernetes-validations:
- - message: CIDR format must contain exactly one '/'
- rule: self.indexOf('/') == self.lastIndexOf('/')
+ - message: Subnet must be in valid IPV4 CIDR format
+ rule: isCIDR(self) && cidr(self).ip().family() ==
+ 4
- message: subnet must be in the range /0 to /29 inclusive
- rule: '[int(self.split(''/'')[1])].all(x, x <= 29
- && x >= 0)'
- - message: a valid IPv4 address must contain 4 octets
- rule: self.split('/')[0].split('.').size() == 4
- - message: first IP address octet must not contain
- leading zeros, must be greater than 0 and less
- or equal to 255
- rule: '[self.findAll(''[0-9]+'')[0]].all(x, x !=
- ''0'' && int(x) <= 255 && !x.startsWith(''0''))'
- - message: IP address octets must not contain leading
- zeros, and must be less or equal to 255
- rule: '[self.findAll(''[0-9]+'')[1], self.findAll(''[0-9]+'')[2],
- self.findAll(''[0-9]+'')[3]].all(x, int(x) <=
- 255 && (x == ''0'' || !x.startsWith(''0'')))'
+ rule: isCIDR(self) && cidr(self).prefixLength()
+ <= 29
+ - message: first IP address octet must not be 0
+ rule: isCIDR(self) && int(self.split('.')[0]) >
+ 0
type: object
ipv6:
description: ipv6 allows users to configure IP settings
@@ -320,80 +312,13 @@ spec:
Note that IPV6 dual addresses are not permitted
type: string
x-kubernetes-validations:
- - message: CIDR format must contain exactly one '/'
- rule: self.indexOf('/') == self.lastIndexOf('/')
+ - message: Subnet must be in valid IPV6 CIDR format
+ rule: isCIDR(self) && cidr(self).ip().family() ==
+ 6
- message: subnet must be in the range /0 to /125
inclusive
- rule: self.split('/').size() == 2 && [int(self.split('/')[1])].all(x,
- x <= 125 && x >= 0)
- - message: IPv6 addresses must contain at most one
- '::' and may only be shortened once
- rule: self.indexOf('::') == self.lastIndexOf('::')
- - message: a valid IPv6 address must contain 8 segments
- unless elided (::), in which case it must contain
- at most 6 non-empty segments
- rule: 'self.contains(''::'') ? self.split(''/'')[0].split('':'').size()
- <= 8 : self.split(''/'')[0].split('':'').size()
- == 8'
- - message: each segment of an IPv6 address must be
- a hexadecimal number between 0 and FFFF, failed
- on segment 1
- rule: 'self.split(''/'')[0].split('':'').size()
- >=1 ? [self.split(''/'')[0].split('':'', 8)[0]].all(x,
- x == '''' || (x.matches(''^[0-9A-Fa-f]{1,4}$''))
- && size(x)<5 ) : true'
- - message: each segment of an IPv6 address must be
- a hexadecimal number between 0 and FFFF, failed
- on segment 2
- rule: 'self.split(''/'')[0].split('':'').size()
- >=2 ? [self.split(''/'')[0].split('':'', 8)[1]].all(x,
- x == '''' || (x.matches(''^[0-9A-Fa-f]{1,4}$''))
- && size(x)<5 ) : true'
- - message: each segment of an IPv6 address must be
- a hexadecimal number between 0 and FFFF, failed
- on segment 3
- rule: 'self.split(''/'')[0].split('':'').size()
- >=3 ? [self.split(''/'')[0].split('':'', 8)[2]].all(x,
- x == '''' || (x.matches(''^[0-9A-Fa-f]{1,4}$''))
- && size(x)<5 ) : true'
- - message: each segment of an IPv6 address must be
- a hexadecimal number between 0 and FFFF, failed
- on segment 4
- rule: 'self.split(''/'')[0].split('':'').size()
- >=4 ? [self.split(''/'')[0].split('':'', 8)[3]].all(x,
- x == '''' || (x.matches(''^[0-9A-Fa-f]{1,4}$''))
- && size(x)<5 ) : true'
- - message: each segment of an IPv6 address must be
- a hexadecimal number between 0 and FFFF, failed
- on segment 5
- rule: 'self.split(''/'')[0].split('':'').size()
- >=5 ? [self.split(''/'')[0].split('':'', 8)[4]].all(x,
- x == '''' || (x.matches(''^[0-9A-Fa-f]{1,4}$''))
- && size(x)<5 ) : true'
- - message: each segment of an IPv6 address must be
- a hexadecimal number between 0 and FFFF, failed
- on segment 6
- rule: 'self.split(''/'')[0].split('':'').size()
- >=6 ? [self.split(''/'')[0].split('':'', 8)[5]].all(x,
- x == '''' || (x.matches(''^[0-9A-Fa-f]{1,4}$''))
- && size(x)<5 ) : true'
- - message: each segment of an IPv6 address must be
- a hexadecimal number between 0 and FFFF, failed
- on segment 7
- rule: 'self.split(''/'')[0].split('':'').size()
- >=7 ? [self.split(''/'')[0].split('':'', 8)[6]].all(x,
- x == '''' || (x.matches(''^[0-9A-Fa-f]{1,4}$''))
- && size(x)<5 ) : true'
- - message: each segment of an IPv6 address must be
- a hexadecimal number between 0 and FFFF, failed
- on segment 8
- rule: 'self.split(''/'')[0].split('':'').size()
- >=8 ? [self.split(''/'')[0].split('':'', 8)[7]].all(x,
- x == '''' || (x.matches(''^[0-9A-Fa-f]{1,4}$''))
- && size(x)<5 ) : true'
- - message: IPv6 dual addresses are not permitted,
- value should not contain `.` characters
- rule: '!self.contains(''.'')'
+ rule: isCIDR(self) && cidr(self).prefixLength()
+ <= 125
type: object
routingViaHost:
default: false
@@ -493,14 +418,10 @@ spec:
x-kubernetes-validations:
- message: Subnet must be in valid IPV4 CIDR format
rule: isCIDR(self) && cidr(self).ip().family() == 4
- - message: first IP address octet must not contain leading
- zeros, must be greater than 0 and less or equal to
- 255
- rule: '[self.findAll(''[0-9]+'')[0]].all(x, x != ''0''
- && int(x) <= 255 && !x.startsWith(''0''))'
- message: subnet must be in the range /0 to /30 inclusive
- rule: '[int(self.split(''/'')[1])].all(x, x <= 30 &&
- x >= 0)'
+ rule: isCIDR(self) && cidr(self).prefixLength() <= 30
+ - message: first IP address octet must not be 0
+ rule: isCIDR(self) && int(self.split('.')[0]) > 0
internalTransitSwitchSubnet:
description: internalTransitSwitchSubnet is a v4 subnet
in IPV4 CIDR format used internally by OVN-Kubernetes
@@ -521,14 +442,10 @@ spec:
x-kubernetes-validations:
- message: Subnet must be in valid IPV4 CIDR format
rule: isCIDR(self) && cidr(self).ip().family() == 4
- - message: first IP address octet must not contain leading
- zeros, must be greater than 0 and less or equal to
- 255
- rule: '[self.findAll(''[0-9]+'')[0]].all(x, x != ''0''
- && int(x) <= 255 && !x.startsWith(''0''))'
- message: subnet must be in the range /0 to /30 inclusive
- rule: '[int(self.split(''/'')[1])].all(x, x <= 30 &&
- x >= 0)'
+ rule: isCIDR(self) && cidr(self).prefixLength() <= 30
+ - message: first IP address octet must not be 0
+ rule: isCIDR(self) && int(self.split('.')[0]) > 0
type: object
ipv6:
description: ipv6 allows users to configure IP settings for
@@ -553,17 +470,7 @@ spec:
- message: Subnet must be in valid IPV6 CIDR format
rule: isCIDR(self) && cidr(self).ip().family() == 6
- message: subnet must be in the range /0 to /125 inclusive
- rule: self.split('/').size() == 2 && [int(self.split('/')[1])].all(x,
- x <= 125 && x >= 0)
- - message: a valid IPv6 address must contain 8 segments
- unless elided (::), in which case it must contain
- at most 6 non-empty segments
- rule: 'self.contains(''::'') ? self.split(''/'')[0].split('':'').size()
- <= 8 : self.split(''/'')[0].split('':'').size() ==
- 8'
- - message: IPv6 dual addresses are not permitted, value
- should not contain `.` characters
- rule: '!self.contains(''.'')'
+ rule: isCIDR(self) && cidr(self).prefixLength() <= 125
internalTransitSwitchSubnet:
description: internalTransitSwitchSubnet is a v4 subnet
in IPV4 CIDR format used internally by OVN-Kubernetes
@@ -586,17 +493,7 @@ spec:
- message: Subnet must be in valid IPV6 CIDR format
rule: isCIDR(self) && cidr(self).ip().family() == 6
- message: subnet must be in the range /0 to /125 inclusive
- rule: self.split('/').size() == 2 && [int(self.split('/')[1])].all(x,
- x <= 125 && x >= 0)
- - message: a valid IPv6 address must contain 8 segments
- unless elided (::), in which case it must contain
- at most 6 non-empty segments
- rule: 'self.contains(''::'') ? self.split(''/'')[0].split('':'').size()
- <= 8 : self.split(''/'')[0].split('':'').size() ==
- 8'
- - message: IPv6 dual addresses are not permitted, value
- should not contain `.` characters
- rule: '!self.contains(''.'')'
+ rule: isCIDR(self) && cidr(self).prefixLength() <= 125
type: object
mtu:
description: mtu is the MTU to use for the tunnel interface.
@@ -891,7 +788,7 @@ spec:
- message: networkType migration in mode other than 'Live' may not
be configured at the same time as mtu migration
rule: '!has(self.mtu) || !has(self.networkType) || self.networkType
- == '''' || has(self.mode) && self.mode == ''Live'''
+ == "" || has(self.mode) && self.mode == ''Live'''
observedConfig:
description: observedConfig holds a sparse config that controller
has observed from the cluster state. It exists in spec because
diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations-CustomNoUpgrade.crd.yaml
index 7acdf0d4b8..dbeb9e9293 100644
--- a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations-CustomNoUpgrade.crd.yaml
+++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations-CustomNoUpgrade.crd.yaml
@@ -308,11 +308,11 @@ spec:
type:
description: type represents the commands that will
be carried out if this NodeDisruptionPolicySpecActionType
- is executed Valid value are Reboot, Drain, Reload,
- Restart, DaemonReload, None and Special reload/restart
- requires a corresponding service target specified
- in the reload/restart field. Other values require
- no further configuration
+ is executed Valid values are Reboot, Drain, Reload,
+ Restart, DaemonReload and None. reload/restart requires
+ a corresponding service target specified in the
+ reload/restart field. Other values require no further
+ configuration
enum:
- Reboot
- Drain
@@ -448,11 +448,10 @@ spec:
type:
description: type represents the commands that will
be carried out if this NodeDisruptionPolicySpecActionType
- is executed Valid value are Reboot, Drain, Reload,
- Restart, DaemonReload, None and Special reload/restart
- requires a corresponding service target specified
- in the reload/restart field. Other values require
- no further configuration
+ is executed Valid values are Reboot, Drain, Reload,
+ Restart, DaemonReload and None. reload/restart requires
+ a corresponding service target specified in the reload/restart
+ field. Other values require no further configuration
enum:
- Reboot
- Drain
@@ -581,11 +580,11 @@ spec:
type:
description: type represents the commands that will
be carried out if this NodeDisruptionPolicySpecActionType
- is executed Valid value are Reboot, Drain, Reload,
- Restart, DaemonReload, None and Special reload/restart
- requires a corresponding service target specified
- in the reload/restart field. Other values require
- no further configuration
+ is executed Valid values are Reboot, Drain, Reload,
+ Restart, DaemonReload and None. reload/restart requires
+ a corresponding service target specified in the
+ reload/restart field. Other values require no further
+ configuration
enum:
- Reboot
- Drain
@@ -865,11 +864,11 @@ spec:
type:
description: type represents the commands that
will be carried out if this NodeDisruptionPolicyStatusActionType
- is executed Valid value are Reboot, Drain, Reload,
- Restart, DaemonReload, None and Special reload/restart
- requires a corresponding service target specified
- in the reload/restart field. Other values require
- no further configuration
+ is executed Valid values are Reboot, Drain,
+ Reload, Restart, DaemonReload, None and Special.
+ reload/restart requires a corresponding service
+ target specified in the reload/restart field.
+ Other values require no further configuration
enum:
- Reboot
- Drain
@@ -1004,8 +1003,8 @@ spec:
type:
description: type represents the commands that will
be carried out if this NodeDisruptionPolicyStatusActionType
- is executed Valid value are Reboot, Drain, Reload,
- Restart, DaemonReload, None and Special reload/restart
+ is executed Valid values are Reboot, Drain, Reload,
+ Restart, DaemonReload, None and Special. reload/restart
requires a corresponding service target specified
in the reload/restart field. Other values require
no further configuration
@@ -1140,11 +1139,11 @@ spec:
type:
description: type represents the commands that
will be carried out if this NodeDisruptionPolicyStatusActionType
- is executed Valid value are Reboot, Drain, Reload,
- Restart, DaemonReload, None and Special reload/restart
- requires a corresponding service target specified
- in the reload/restart field. Other values require
- no further configuration
+ is executed Valid values are Reboot, Drain,
+ Reload, Restart, DaemonReload, None and Special.
+ reload/restart requires a corresponding service
+ target specified in the reload/restart field.
+ Other values require no further configuration
enum:
- Reboot
- Drain
diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations-DevPreviewNoUpgrade.crd.yaml
index d4200688e4..4f47e4631f 100644
--- a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations-DevPreviewNoUpgrade.crd.yaml
+++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations-DevPreviewNoUpgrade.crd.yaml
@@ -308,11 +308,11 @@ spec:
type:
description: type represents the commands that will
be carried out if this NodeDisruptionPolicySpecActionType
- is executed Valid value are Reboot, Drain, Reload,
- Restart, DaemonReload, None and Special reload/restart
- requires a corresponding service target specified
- in the reload/restart field. Other values require
- no further configuration
+ is executed Valid values are Reboot, Drain, Reload,
+ Restart, DaemonReload and None. reload/restart requires
+ a corresponding service target specified in the
+ reload/restart field. Other values require no further
+ configuration
enum:
- Reboot
- Drain
@@ -448,11 +448,10 @@ spec:
type:
description: type represents the commands that will
be carried out if this NodeDisruptionPolicySpecActionType
- is executed Valid value are Reboot, Drain, Reload,
- Restart, DaemonReload, None and Special reload/restart
- requires a corresponding service target specified
- in the reload/restart field. Other values require
- no further configuration
+ is executed Valid values are Reboot, Drain, Reload,
+ Restart, DaemonReload and None. reload/restart requires
+ a corresponding service target specified in the reload/restart
+ field. Other values require no further configuration
enum:
- Reboot
- Drain
@@ -581,11 +580,11 @@ spec:
type:
description: type represents the commands that will
be carried out if this NodeDisruptionPolicySpecActionType
- is executed Valid value are Reboot, Drain, Reload,
- Restart, DaemonReload, None and Special reload/restart
- requires a corresponding service target specified
- in the reload/restart field. Other values require
- no further configuration
+ is executed Valid values are Reboot, Drain, Reload,
+ Restart, DaemonReload and None. reload/restart requires
+ a corresponding service target specified in the
+ reload/restart field. Other values require no further
+ configuration
enum:
- Reboot
- Drain
@@ -865,11 +864,11 @@ spec:
type:
description: type represents the commands that
will be carried out if this NodeDisruptionPolicyStatusActionType
- is executed Valid value are Reboot, Drain, Reload,
- Restart, DaemonReload, None and Special reload/restart
- requires a corresponding service target specified
- in the reload/restart field. Other values require
- no further configuration
+ is executed Valid values are Reboot, Drain,
+ Reload, Restart, DaemonReload, None and Special.
+ reload/restart requires a corresponding service
+ target specified in the reload/restart field.
+ Other values require no further configuration
enum:
- Reboot
- Drain
@@ -1004,8 +1003,8 @@ spec:
type:
description: type represents the commands that will
be carried out if this NodeDisruptionPolicyStatusActionType
- is executed Valid value are Reboot, Drain, Reload,
- Restart, DaemonReload, None and Special reload/restart
+ is executed Valid values are Reboot, Drain, Reload,
+ Restart, DaemonReload, None and Special. reload/restart
requires a corresponding service target specified
in the reload/restart field. Other values require
no further configuration
@@ -1140,11 +1139,11 @@ spec:
type:
description: type represents the commands that
will be carried out if this NodeDisruptionPolicyStatusActionType
- is executed Valid value are Reboot, Drain, Reload,
- Restart, DaemonReload, None and Special reload/restart
- requires a corresponding service target specified
- in the reload/restart field. Other values require
- no further configuration
+ is executed Valid values are Reboot, Drain,
+ Reload, Restart, DaemonReload, None and Special.
+ reload/restart requires a corresponding service
+ target specified in the reload/restart field.
+ Other values require no further configuration
enum:
- Reboot
- Drain
diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations-TechPreviewNoUpgrade.crd.yaml
index 8bb4aa8715..bf7dab04ac 100644
--- a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations-TechPreviewNoUpgrade.crd.yaml
+++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations-TechPreviewNoUpgrade.crd.yaml
@@ -308,11 +308,11 @@ spec:
type:
description: type represents the commands that will
be carried out if this NodeDisruptionPolicySpecActionType
- is executed Valid value are Reboot, Drain, Reload,
- Restart, DaemonReload, None and Special reload/restart
- requires a corresponding service target specified
- in the reload/restart field. Other values require
- no further configuration
+ is executed Valid values are Reboot, Drain, Reload,
+ Restart, DaemonReload and None. reload/restart requires
+ a corresponding service target specified in the
+ reload/restart field. Other values require no further
+ configuration
enum:
- Reboot
- Drain
@@ -448,11 +448,10 @@ spec:
type:
description: type represents the commands that will
be carried out if this NodeDisruptionPolicySpecActionType
- is executed Valid value are Reboot, Drain, Reload,
- Restart, DaemonReload, None and Special reload/restart
- requires a corresponding service target specified
- in the reload/restart field. Other values require
- no further configuration
+ is executed Valid values are Reboot, Drain, Reload,
+ Restart, DaemonReload and None. reload/restart requires
+ a corresponding service target specified in the reload/restart
+ field. Other values require no further configuration
enum:
- Reboot
- Drain
@@ -581,11 +580,11 @@ spec:
type:
description: type represents the commands that will
be carried out if this NodeDisruptionPolicySpecActionType
- is executed Valid value are Reboot, Drain, Reload,
- Restart, DaemonReload, None and Special reload/restart
- requires a corresponding service target specified
- in the reload/restart field. Other values require
- no further configuration
+ is executed Valid values are Reboot, Drain, Reload,
+ Restart, DaemonReload and None. reload/restart requires
+ a corresponding service target specified in the
+ reload/restart field. Other values require no further
+ configuration
enum:
- Reboot
- Drain
@@ -865,11 +864,11 @@ spec:
type:
description: type represents the commands that
will be carried out if this NodeDisruptionPolicyStatusActionType
- is executed Valid value are Reboot, Drain, Reload,
- Restart, DaemonReload, None and Special reload/restart
- requires a corresponding service target specified
- in the reload/restart field. Other values require
- no further configuration
+ is executed Valid values are Reboot, Drain,
+ Reload, Restart, DaemonReload, None and Special.
+ reload/restart requires a corresponding service
+ target specified in the reload/restart field.
+ Other values require no further configuration
enum:
- Reboot
- Drain
@@ -1004,8 +1003,8 @@ spec:
type:
description: type represents the commands that will
be carried out if this NodeDisruptionPolicyStatusActionType
- is executed Valid value are Reboot, Drain, Reload,
- Restart, DaemonReload, None and Special reload/restart
+ is executed Valid values are Reboot, Drain, Reload,
+ Restart, DaemonReload, None and Special. reload/restart
requires a corresponding service target specified
in the reload/restart field. Other values require
no further configuration
@@ -1140,11 +1139,11 @@ spec:
type:
description: type represents the commands that
will be carried out if this NodeDisruptionPolicyStatusActionType
- is executed Valid value are Reboot, Drain, Reload,
- Restart, DaemonReload, None and Special reload/restart
- requires a corresponding service target specified
- in the reload/restart field. Other values require
- no further configuration
+ is executed Valid values are Reboot, Drain,
+ Reload, Restart, DaemonReload, None and Special.
+ reload/restart requires a corresponding service
+ target specified in the reload/restart field.
+ Other values require no further configuration
enum:
- Reboot
- Drain
diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go
index 95017ec934..81d4ce91e5 100644
--- a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go
+++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go
@@ -1346,7 +1346,7 @@ func (NodeDisruptionPolicyConfig) SwaggerDoc() map[string]string {
}
var map_NodeDisruptionPolicySpecAction = map[string]string{
- "type": "type represents the commands that will be carried out if this NodeDisruptionPolicySpecActionType is executed Valid value are Reboot, Drain, Reload, Restart, DaemonReload, None and Special reload/restart requires a corresponding service target specified in the reload/restart field. Other values require no further configuration",
+ "type": "type represents the commands that will be carried out if this NodeDisruptionPolicySpecActionType is executed Valid values are Reboot, Drain, Reload, Restart, DaemonReload and None. reload/restart requires a corresponding service target specified in the reload/restart field. Other values require no further configuration",
"reload": "reload specifies the service to reload, only valid if type is reload",
"restart": "restart specifies the service to restart, only valid if type is restart",
}
@@ -1393,7 +1393,7 @@ func (NodeDisruptionPolicyStatus) SwaggerDoc() map[string]string {
}
var map_NodeDisruptionPolicyStatusAction = map[string]string{
- "type": "type represents the commands that will be carried out if this NodeDisruptionPolicyStatusActionType is executed Valid value are Reboot, Drain, Reload, Restart, DaemonReload, None and Special reload/restart requires a corresponding service target specified in the reload/restart field. Other values require no further configuration",
+ "type": "type represents the commands that will be carried out if this NodeDisruptionPolicyStatusActionType is executed Valid values are Reboot, Drain, Reload, Restart, DaemonReload, None and Special. reload/restart requires a corresponding service target specified in the reload/restart field. Other values require no further configuration",
"reload": "reload specifies the service to reload, only valid if type is reload",
"restart": "restart specifies the service to restart, only valid if type is restart",
}
diff --git a/vendor/github.com/openshift/api/payload-command/render/config.go b/vendor/github.com/openshift/api/payload-command/render/config.go
new file mode 100644
index 0000000000..d98b39b45c
--- /dev/null
+++ b/vendor/github.com/openshift/api/payload-command/render/config.go
@@ -0,0 +1,47 @@
+package render
+
+import (
+ "encoding/json"
+
+ configv1 "github.com/openshift/api/config/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/serializer"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+var (
+ configScheme = runtime.NewScheme()
+ configCodecs = serializer.NewCodecFactory(configScheme)
+)
+
+func init() {
+ utilruntime.Must(configv1.AddToScheme(configScheme))
+}
+
+func readFeatureGateV1OrDie(objBytes []byte) *configv1.FeatureGate {
+ requiredObj, err := runtime.Decode(configCodecs.UniversalDecoder(configv1.SchemeGroupVersion), objBytes)
+ if err != nil {
+ panic(err)
+ }
+
+ return requiredObj.(*configv1.FeatureGate)
+}
+
+func writeFeatureGateV1OrDie(obj *configv1.FeatureGate) string {
+ asMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj)
+ if err != nil {
+ panic(err)
+ }
+ if _, ok := asMap["apiVersion"]; !ok {
+ asMap["apiVersion"] = configv1.GroupVersion.Identifier()
+ }
+ if _, ok := asMap["kind"]; !ok {
+ asMap["kind"] = "FeatureGate"
+ }
+
+ ret, err := json.MarshalIndent(asMap, "", " ")
+ if err != nil {
+ panic(err)
+ }
+ return string(ret) + "\n"
+}
diff --git a/vendor/github.com/openshift/api/payload-command/render/render.go b/vendor/github.com/openshift/api/payload-command/render/render.go
new file mode 100644
index 0000000000..3fa9c4bc6a
--- /dev/null
+++ b/vendor/github.com/openshift/api/payload-command/render/render.go
@@ -0,0 +1,329 @@
+package render
+
+import (
+ "flag"
+ "fmt"
+ "github.com/openshift/api/features"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+
+ configv1 "github.com/openshift/api/config/v1"
+ assets "github.com/openshift/api/payload-command/render/renderassets"
+ "k8s.io/apimachinery/pkg/util/sets"
+)
+
+// RenderOpts holds values to drive the render command.
+type RenderOpts struct {
+ ImageProvidedManifestDir string
+ RenderedManifestInputFilename string
+ PayloadVersion string
+ AssetOutputDir string
+ UnprefixedClusterProfile string
+}
+
+func (o *RenderOpts) AddFlags(fs *flag.FlagSet) {
+ fs.StringVar(&o.RenderedManifestInputFilename, "rendered-manifest-dir", o.RenderedManifestInputFilename,
+ "files or directories containing yaml or json manifests that will be created via cluster-bootstrapping.")
+ fs.StringVar(&o.ImageProvidedManifestDir, "image-manifests", o.ImageProvidedManifestDir, "Directory containing the manifest templates provided by the image.")
+ fs.StringVar(&o.PayloadVersion, "payload-version", o.PayloadVersion, "Version that will eventually be placed into ClusterOperator.status. This normally comes from the CVO set via env var: OPERATOR_IMAGE_VERSION.")
+ fs.StringVar(&o.AssetOutputDir, "asset-output-dir", o.AssetOutputDir, "Output path for rendered manifests.")
+ fs.StringVar(&o.UnprefixedClusterProfile, "cluster-profile", o.UnprefixedClusterProfile, "self-managed-high-availability, single-node-developer, ibm-cloud-managed")
+}
+
+// Validate verifies the inputs.
+func (o *RenderOpts) Validate() error {
+ switch o.UnprefixedClusterProfile {
+ case "":
+ // to be disallowed soonish
+ case "self-managed-high-availability", "ibm-cloud-managed":
+ // ok
+ default:
+ return fmt.Errorf("--cluster-profile must be one of self-managed-high-availability, single-node-developer, ibm-cloud-managed")
+ }
+
+ return nil
+}
+
+// Complete fills in missing values before command execution.
+func (o *RenderOpts) Complete() error {
+ // TODO cluster-config-operator improperly assumes all single node clusters are this single-node-developer. apparently single node is something different.
+ // TODO once cluster-config-operator is fixed, this line can be removed, but big rocks first.
+ if o.UnprefixedClusterProfile == "single-node-developer" {
+ o.UnprefixedClusterProfile = "self-managed-high-availability"
+ }
+ return nil
+}
+
+// Run contains the logic of the render command.
+func (o *RenderOpts) Run() error {
+ featureSet := ""
+ featureGateFiles, err := featureGateManifests([]string{o.RenderedManifestInputFilename})
+ if err != nil {
+ return fmt.Errorf("problem with featuregate manifests: %w", err)
+ }
+ clusterProfileAnnotationName := fmt.Sprintf("include.release.openshift.io/%s", o.UnprefixedClusterProfile)
+
+ for _, featureGateFile := range featureGateFiles {
+ uncastObj, err := featureGateFile.GetDecodedObj()
+ if err != nil {
+ return fmt.Errorf("error decoding FeatureGate: %w", err)
+ }
+ featureGates := &configv1.FeatureGate{}
+ err = runtime.DefaultUnstructuredConverter.FromUnstructured(uncastObj.(*unstructured.Unstructured).Object, featureGates)
+ if err != nil {
+ return fmt.Errorf("error converting FeatureGate: %w", err)
+ }
+ if featureGates.Annotations == nil {
+ featureGates.Annotations = map[string]string{}
+ }
+
+ if featureGates.Spec.FeatureSet == configv1.CustomNoUpgrade {
+ featureSet = string(featureGates.Spec.FeatureSet)
+ renderedFeatureGates, err := renderCustomNoUpgradeFeatureGate(featureGates, features.ClusterProfileName(clusterProfileAnnotationName), o.PayloadVersion)
+ if err != nil {
+ return err
+ }
+ featureGateOutBytes := writeFeatureGateV1OrDie(renderedFeatureGates)
+ if err := os.WriteFile(featureGateFile.OriginalFilename, []byte(featureGateOutBytes), 0644); err != nil {
+ return fmt.Errorf("error writing FeatureGate manifest: %w", err)
+ }
+ continue
+ }
+
+ // if the manifest has cluster profiles specified, the manifest's list must include the configured clusterprofile.
+ manifestClusterProfiles := clusterProfilesFrom(featureGates.Annotations)
+ switch {
+ case len(manifestClusterProfiles) > 0 && !manifestClusterProfiles.Has(clusterProfileAnnotationName):
+ return fmt.Errorf("manifest has cluster-profile preferences (%v) that do not contain the configured clusterProfile: %q",
+ manifestClusterProfiles.UnsortedList(), clusterProfileAnnotationName)
+ case len(manifestClusterProfiles) == 0 && len(clusterProfileAnnotationName) != 0:
+ featureGates.Annotations[clusterProfileAnnotationName] = "true"
+ }
+
+ featureGateStatus, err := features.FeatureSets(features.ClusterProfileName(clusterProfileAnnotationName), featureGates.Spec.FeatureSet)
+ if err != nil {
+ return fmt.Errorf("unable to resolve featureGateStatus: %w", err)
+ }
+ currentDetails := FeaturesGateDetailsFromFeatureSets(featureGateStatus, o.PayloadVersion)
+ featureGates.Status.FeatureGates = []configv1.FeatureGateDetails{*currentDetails}
+
+ featureGateOutBytes := writeFeatureGateV1OrDie(featureGates)
+ if err := os.WriteFile(featureGateFile.OriginalFilename, []byte(featureGateOutBytes), 0644); err != nil {
+ return fmt.Errorf("error writing FeatureGate manifest: %w", err)
+ }
+ featureSet = string(featureGates.Spec.FeatureSet)
+ }
+
+ err = assets.SubstituteAndCopyFiles(
+ o.ImageProvidedManifestDir,
+ filepath.Join(o.AssetOutputDir, "manifests"),
+ featureSet,
+ o.UnprefixedClusterProfile,
+ nil,
+ )
+ if err != nil {
+ return fmt.Errorf("failed to substitute and copy files: %w", err)
+ }
+
+ return nil
+}
+
+func renderCustomNoUpgradeFeatureGate(in *configv1.FeatureGate, clusterProfile features.ClusterProfileName, payloadVersion string) (*configv1.FeatureGate, error) {
+ if in.Spec.FeatureSet != configv1.CustomNoUpgrade {
+ return nil, fmt.Errorf("not CustomNoUpgrade")
+ }
+ for _, forceEnabled := range in.Spec.CustomNoUpgrade.Enabled {
+ if inListOfNames(in.Spec.CustomNoUpgrade.Disabled, forceEnabled) {
+ return nil, fmt.Errorf("trying to enable and disable %q", forceEnabled)
+ }
+ }
+
+ ret := in.DeepCopy()
+
+ // if possible, set the payload version to ease usage during install of different versions
+ switch {
+ case len(in.Status.FeatureGates) > 1:
+ return in, nil
+ case len(in.Status.FeatureGates) == 1 && len(in.Status.FeatureGates[0].Version) != 0:
+ return in, nil
+
+ case len(in.Status.FeatureGates) == 1 && len(in.Status.FeatureGates[0].Version) == 0:
+ ret.Status.FeatureGates[0].Version = payloadVersion
+ case len(in.Status.FeatureGates) == 0:
+ ret.Status.FeatureGates = append(ret.Status.FeatureGates, configv1.FeatureGateDetails{
+ Version: payloadVersion,
+ Enabled: []configv1.FeatureGateAttributes{},
+ Disabled: []configv1.FeatureGateAttributes{},
+ })
+ }
+
+ defaultFeatureGates, err := features.FeatureSets(clusterProfile, configv1.Default)
+ if err != nil {
+ return nil, err
+ }
+
+ enabled := []configv1.FeatureGateAttributes{}
+ disabled := []configv1.FeatureGateAttributes{}
+ if in.Spec.CustomNoUpgrade != nil {
+ enabled = []configv1.FeatureGateAttributes{}
+ for _, forceEnabled := range in.Spec.CustomNoUpgrade.Enabled {
+ enabled = append(enabled, configv1.FeatureGateAttributes{Name: forceEnabled})
+ }
+ for _, defaultEnabled := range defaultFeatureGates.Enabled {
+ if !inListOfNames(in.Spec.CustomNoUpgrade.Disabled, defaultEnabled.FeatureGateAttributes.Name) {
+ enabled = append(enabled, defaultEnabled.FeatureGateAttributes)
+ }
+ }
+
+ disabled = []configv1.FeatureGateAttributes{}
+ for _, forceDisabled := range in.Spec.CustomNoUpgrade.Disabled {
+ disabled = append(disabled, configv1.FeatureGateAttributes{Name: forceDisabled})
+ }
+ for _, defaultDisabled := range defaultFeatureGates.Disabled {
+ if !inListOfNames(in.Spec.CustomNoUpgrade.Enabled, defaultDisabled.FeatureGateAttributes.Name) {
+ disabled = append(disabled, defaultDisabled.FeatureGateAttributes)
+ }
+ }
+ } else {
+ for _, defaultEnabled := range defaultFeatureGates.Enabled {
+ enabled = append(enabled, defaultEnabled.FeatureGateAttributes)
+ }
+ for _, defaultDisabled := range defaultFeatureGates.Disabled {
+ disabled = append(disabled, defaultDisabled.FeatureGateAttributes)
+ }
+ }
+
+ // sort for stability
+ sort.Sort(byName(enabled))
+ sort.Sort(byName(disabled))
+ ret.Status.FeatureGates[0].Enabled = enabled
+ ret.Status.FeatureGates[0].Disabled = disabled
+
+ return ret, nil
+}
+
+func inListOfNames(haystack []configv1.FeatureGateName, needle configv1.FeatureGateName) bool {
+ for _, curr := range haystack {
+ if curr == needle {
+ return true
+ }
+ }
+ return false
+}
+
+func clusterProfilesFrom(annotations map[string]string) sets.Set[string] {
+ ret := sets.New[string]()
+ for k, v := range annotations {
+ if strings.HasPrefix(k, "include.release.openshift.io/") && v == "true" {
+ ret.Insert(k)
+ }
+ }
+ return ret
+}
+
+func featureGateManifests(renderedManifestInputFilenames []string) (assets.RenderedManifests, error) {
+ if len(renderedManifestInputFilenames) == 0 {
+ return nil, fmt.Errorf("cannot return FeatureGate without rendered manifests")
+ }
+
+ inputManifests := assets.RenderedManifests{}
+ for _, filename := range renderedManifestInputFilenames {
+ manifestContent, err := assets.LoadFilesRecursively(filename)
+ if err != nil {
+ return nil, fmt.Errorf("failed loading rendered manifest inputs from %q: %w", filename, err)
+ }
+ for manifestFile, content := range manifestContent {
+ inputManifests = append(inputManifests, assets.RenderedManifest{
+ OriginalFilename: filepath.Join(filename, manifestFile),
+ Content: content,
+ })
+ }
+ }
+ featureGates := inputManifests.ListManifestOfType(configv1.GroupVersion.WithKind("FeatureGate"))
+ if len(featureGates) == 0 {
+ return nil, fmt.Errorf("no FeatureGates found in manfest dir: %v", renderedManifestInputFilenames)
+ }
+
+ return featureGates, nil
+}
+
+func FeaturesGateDetailsFromFeatureSets(featureGateStatus *features.FeatureGateEnabledDisabled, currentVersion string) *configv1.FeatureGateDetails {
+ currentDetails := configv1.FeatureGateDetails{
+ Version: currentVersion,
+ }
+ for _, gateName := range featureGateStatus.Enabled {
+ currentDetails.Enabled = append(currentDetails.Enabled, *gateName.FeatureGateAttributes.DeepCopy())
+ }
+ for _, gateName := range featureGateStatus.Disabled {
+ currentDetails.Disabled = append(currentDetails.Disabled, *gateName.FeatureGateAttributes.DeepCopy())
+ }
+
+ // sort for stability
+ sort.Sort(byName(currentDetails.Enabled))
+ sort.Sort(byName(currentDetails.Disabled))
+
+ return ¤tDetails
+}
+
+type byName []configv1.FeatureGateAttributes
+
+func (a byName) Len() int { return len(a) }
+func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byName) Less(i, j int) bool {
+ if strings.Compare(string(a[i].Name), string(a[j].Name)) < 0 {
+ return true
+ }
+ return false
+}
+
+func featuresGatesFromFeatureSets(knownFeatureSets map[configv1.FeatureSet]*features.FeatureGateEnabledDisabled, featureGates *configv1.FeatureGate) ([]configv1.FeatureGateName, []configv1.FeatureGateName, error) {
+ if featureGates.Spec.FeatureSet == configv1.CustomNoUpgrade {
+ if featureGates.Spec.FeatureGateSelection.CustomNoUpgrade != nil {
+ completeEnabled, completeDisabled := completeFeatureGates(knownFeatureSets, featureGates.Spec.FeatureGateSelection.CustomNoUpgrade.Enabled, featureGates.Spec.FeatureGateSelection.CustomNoUpgrade.Disabled)
+ return completeEnabled, completeDisabled, nil
+ }
+ return []configv1.FeatureGateName{}, []configv1.FeatureGateName{}, nil
+ }
+
+ featureSet, ok := knownFeatureSets[featureGates.Spec.FeatureSet]
+ if !ok {
+ return []configv1.FeatureGateName{}, []configv1.FeatureGateName{}, fmt.Errorf(".spec.featureSet %q not found", featureSet)
+ }
+
+ completeEnabled, completeDisabled := completeFeatureGates(knownFeatureSets, toFeatureGateNames(featureSet.Enabled), toFeatureGateNames(featureSet.Disabled))
+ return completeEnabled, completeDisabled, nil
+}
+
+func toFeatureGateNames(in []features.FeatureGateDescription) []configv1.FeatureGateName {
+ out := []configv1.FeatureGateName{}
+ for _, curr := range in {
+ out = append(out, curr.FeatureGateAttributes.Name)
+ }
+
+ return out
+}
+
+// completeFeatureGates identifies every known feature and ensures that is explicitly on or explicitly off
+func completeFeatureGates(knownFeatureSets map[configv1.FeatureSet]*features.FeatureGateEnabledDisabled, enabled, disabled []configv1.FeatureGateName) ([]configv1.FeatureGateName, []configv1.FeatureGateName) {
+ specificallyEnabledFeatureGates := sets.New[configv1.FeatureGateName]()
+ specificallyEnabledFeatureGates.Insert(enabled...)
+
+ knownFeatureGates := sets.New[configv1.FeatureGateName]()
+ knownFeatureGates.Insert(enabled...)
+ knownFeatureGates.Insert(disabled...)
+ for _, known := range knownFeatureSets {
+ for _, curr := range known.Disabled {
+ knownFeatureGates.Insert(curr.FeatureGateAttributes.Name)
+ }
+ for _, curr := range known.Enabled {
+ knownFeatureGates.Insert(curr.FeatureGateAttributes.Name)
+ }
+ }
+
+ return enabled, knownFeatureGates.Difference(specificallyEnabledFeatureGates).UnsortedList()
+}
diff --git a/vendor/github.com/openshift/api/payload-command/render/renderassets/assets.go b/vendor/github.com/openshift/api/payload-command/render/renderassets/assets.go
new file mode 100644
index 0000000000..0ec0802212
--- /dev/null
+++ b/vendor/github.com/openshift/api/payload-command/render/renderassets/assets.go
@@ -0,0 +1,222 @@
+package assets
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/util/errors"
+)
+
+type Permission os.FileMode
+
+const (
+ PermissionDirectoryDefault Permission = 0755
+ PermissionFileDefault Permission = 0644
+ PermissionFileRestricted Permission = 0600
+)
+
+// Asset defines a single static asset.
+type Asset struct {
+ Name string
+ FilePermission Permission
+ Data []byte
+}
+
+// Assets is a list of assets.
+type Assets []Asset
+
+// New walks through a directory recursively and renders each file as asset. Only those files
+// are rendered that make all predicates true.
+func New(dir string, data interface{}, manifestPredicates []FileContentsPredicate, predicates ...FileInfoPredicate) (Assets, error) {
+ files, err := LoadFilesRecursively(dir, predicates...)
+ if err != nil {
+ return nil, err
+ }
+
+ var as Assets
+ var errs []error
+ for path, bs := range files {
+ a, err := assetFromTemplate(path, bs, data)
+ if err != nil {
+ return nil, fmt.Errorf("failed to render %q: %v", path, err)
+ }
+
+ skipManifest := false
+ for _, manifestPredicate := range manifestPredicates {
+ shouldInclude, err := manifestPredicate(a.Data)
+ if err != nil {
+ return nil, fmt.Errorf("failed to check manifest filter %q: %v", path, err)
+ }
+ if !shouldInclude {
+ skipManifest = true
+ break
+ }
+ }
+ if skipManifest {
+ continue
+ }
+
+ as = append(as, *a)
+ }
+
+ if len(errs) > 0 {
+ return nil, errors.NewAggregate(errs)
+ }
+
+ return as, nil
+}
+
+// WriteFiles writes the assets to specified path.
+func (as Assets) WriteFiles(path string) error {
+ if err := os.MkdirAll(path, os.FileMode(PermissionDirectoryDefault)); err != nil {
+ return err
+ }
+ for _, asset := range as {
+ if _, err := os.Stat(path); os.IsExist(err) {
+ fmt.Printf("WARNING: File %s already exists, content will be replaced\n", path)
+ }
+ if err := asset.WriteFile(path); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// WriteFile writes a single asset into specified path.
+func (a Asset) WriteFile(path string) error {
+ f := filepath.Join(path, a.Name)
+ perms := PermissionFileDefault
+ if err := os.MkdirAll(filepath.Dir(f), os.FileMode(PermissionDirectoryDefault)); err != nil {
+ return err
+ }
+ if a.FilePermission != 0 {
+ perms = a.FilePermission
+ }
+ fmt.Printf("Writing asset: %s\n", f)
+ return ioutil.WriteFile(f, a.Data, os.FileMode(perms))
+}
+
+// MustCreateAssetFromTemplate process the given template using and return an asset.
+func MustCreateAssetFromTemplate(name string, template []byte, config interface{}) Asset {
+ asset, err := assetFromTemplate(name, template, config)
+ if err != nil {
+ panic(err)
+ }
+ return *asset
+}
+
+func assetFromTemplate(name string, tb []byte, data interface{}) (*Asset, error) {
+ bs, err := renderFile(name, tb, data)
+ if err != nil {
+ return nil, err
+ }
+ return &Asset{Name: name, Data: bs}, nil
+}
+
+type FileInfoPredicate func(path string, info os.FileInfo) (bool, error)
+
+type FileContentsPredicate func(manifest []byte) (bool, error)
+
+// OnlyYaml is a predicate for LoadFilesRecursively filters out non-yaml files.
+func OnlyYaml(_ string, info os.FileInfo) (bool, error) {
+ return strings.HasSuffix(info.Name(), ".yaml") || strings.HasSuffix(info.Name(), ".yml"), nil
+}
+
+// InstallerFeatureSet returns a predicate for LoadFilesRecursively that filters manifests
+// based on the specified FeatureSet.
+func InstallerFeatureSet(featureSet string) FileContentsPredicate {
+ targetFeatureSet := "Default"
+ if len(featureSet) > 0 {
+ targetFeatureSet = featureSet
+ }
+ return func(manifest []byte) (bool, error) {
+ uncastObj, _, err := codecs.UniversalDecoder().Decode(manifest, nil, &unstructured.Unstructured{})
+ if err != nil {
+ panic(fmt.Errorf("unable to decode: %w", err))
+ }
+
+ manifestFeatureSets := uncastObj.(*unstructured.Unstructured).GetAnnotations()["release.openshift.io/feature-set"]
+ if len(manifestFeatureSets) == 0 {
+ return true, nil
+ }
+ for _, manifestFeatureSet := range strings.Split(manifestFeatureSets, ",") {
+ if manifestFeatureSet == targetFeatureSet {
+ return true, nil
+ }
+ }
+ return false, nil
+ }
+}
+
+// ClusterProfile returns a predicate for LoadFilesRecursively that filters manifests
+// based on the specified FeatureSet.
+func ClusterProfile(clusterProfile string) FileContentsPredicate {
+ // be compatible with previous behavior
+ if len(clusterProfile) == 0 {
+ return func(manifest []byte) (bool, error) {
+ return true, nil
+ }
+ }
+
+ clusterProfileAnnotationName := fmt.Sprintf("include.release.openshift.io/%s", clusterProfile)
+ return func(manifest []byte) (bool, error) {
+ uncastObj, _, err := codecs.UniversalDecoder().Decode(manifest, nil, &unstructured.Unstructured{})
+ if err != nil {
+ panic(fmt.Errorf("unable to decode: %w", err))
+ }
+
+ isClusterProfileEnabled := uncastObj.(*unstructured.Unstructured).GetAnnotations()[clusterProfileAnnotationName]
+ if isClusterProfileEnabled == "true" {
+ return true, nil
+ }
+ return false, nil
+ }
+}
+
+// LoadFilesRecursively returns a map from relative path names to file content.
+func LoadFilesRecursively(dir string, predicates ...FileInfoPredicate) (map[string][]byte, error) {
+ files := map[string][]byte{}
+ err := filepath.Walk(dir,
+ func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ if info.IsDir() {
+ return nil
+ }
+
+ for _, p := range predicates {
+ include, err := p(path, info)
+ if err != nil {
+ return err
+ }
+ if !include {
+ return nil
+ }
+ }
+
+ bs, err := ioutil.ReadFile(path)
+ if err != nil {
+ return err
+ }
+
+ // make path relative to dir
+ rel, err := filepath.Rel(dir, path)
+ if err != nil {
+ return err
+ }
+
+ files[rel] = bs
+ return nil
+ },
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ return files, nil
+}
diff --git a/vendor/github.com/openshift/api/payload-command/render/renderassets/rendered_manifests.go b/vendor/github.com/openshift/api/payload-command/render/renderassets/rendered_manifests.go
new file mode 100644
index 0000000000..abd175587d
--- /dev/null
+++ b/vendor/github.com/openshift/api/payload-command/render/renderassets/rendered_manifests.go
@@ -0,0 +1,94 @@
+package assets
+
+import (
+ "fmt"
+
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/runtime/serializer"
+ "k8s.io/klog/v2"
+)
+
+type RenderedManifests []RenderedManifest
+
+type RenderedManifest struct {
+ OriginalFilename string
+ Content []byte
+
+ // use GetDecodedObj to access
+ decodedObj runtime.Object
+}
+
+func (renderedManifests RenderedManifests) ListManifestOfType(gvk schema.GroupVersionKind) []RenderedManifest {
+ ret := []RenderedManifest{}
+ for i := range renderedManifests {
+ obj, err := renderedManifests[i].GetDecodedObj()
+ if err != nil {
+ klog.Warningf("failure to read %q: %v", renderedManifests[i].OriginalFilename, err)
+ continue
+ }
+ if obj.GetObjectKind().GroupVersionKind() == gvk {
+ ret = append(ret, renderedManifests[i])
+ }
+ }
+
+ return ret
+}
+
+func (renderedManifests RenderedManifests) GetManifest(gvk schema.GroupVersionKind, namespace, name string) (RenderedManifest, error) {
+ for i := range renderedManifests {
+ obj, err := renderedManifests[i].GetDecodedObj()
+ if err != nil {
+ klog.Warningf("failure to read %q: %v", renderedManifests[i].OriginalFilename, err)
+ continue
+ }
+ if obj.GetObjectKind().GroupVersionKind() != gvk {
+ continue
+ }
+ objMetadata, err := meta.Accessor(obj)
+ if err != nil {
+ klog.Warningf("failure to read metadata %q: %v", renderedManifests[i].OriginalFilename, err)
+ continue
+ }
+
+ // since validation requires that all of these are the same, it doesn't matterwhich one we return
+ if objMetadata.GetName() == name && objMetadata.GetNamespace() == namespace {
+ return renderedManifests[i], nil
+ }
+ }
+
+ return RenderedManifest{}, apierrors.NewNotFound(
+ schema.GroupResource{
+ Group: gvk.Group,
+ Resource: gvk.Kind,
+ },
+ name)
+}
+
+func (renderedManifests RenderedManifests) GetObject(gvk schema.GroupVersionKind, namespace, name string) (runtime.Object, error) {
+ manifest, err := renderedManifests.GetManifest(gvk, namespace, name)
+ if err != nil {
+ return nil, err
+ }
+ return manifest.GetDecodedObj()
+}
+
+var localScheme = runtime.NewScheme()
+var codecs = serializer.NewCodecFactory(localScheme)
+
+func (c *RenderedManifest) GetDecodedObj() (runtime.Object, error) {
+ if c.decodedObj != nil {
+ return c.decodedObj, nil
+ }
+
+ udi, _, err := codecs.UniversalDecoder().Decode(c.Content, nil, &unstructured.Unstructured{})
+ if err != nil {
+ return nil, fmt.Errorf("unable to decode %q: %w", c.OriginalFilename, err)
+ }
+ c.decodedObj = udi
+
+ return c.decodedObj, nil
+}
diff --git a/vendor/github.com/openshift/api/payload-command/render/renderassets/template.go b/vendor/github.com/openshift/api/payload-command/render/renderassets/template.go
new file mode 100644
index 0000000000..4208348181
--- /dev/null
+++ b/vendor/github.com/openshift/api/payload-command/render/renderassets/template.go
@@ -0,0 +1,55 @@
+package assets
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+ "strings"
+ "text/template"
+)
+
+var templateFuncs = map[string]interface{}{
+ "base64": base64encode,
+ "indent": indent,
+ "load": load,
+}
+
+func AddTemplateFunc(name string, fn interface{}) error {
+ if _, ok := templateFuncs[name]; ok {
+ return fmt.Errorf("%q already registered as template func", name)
+ }
+ templateFuncs[name] = fn
+ return nil
+}
+
+func AddTemplateFuncOrDie(name string, fn interface{}) {
+ err := AddTemplateFunc(name, fn)
+ if err != nil {
+ panic(err)
+ }
+}
+
+func indent(indention int, v []byte) string {
+ newline := "\n" + strings.Repeat(" ", indention)
+ return strings.Replace(string(v), "\n", newline, -1)
+}
+
+func base64encode(v []byte) string {
+ return base64.StdEncoding.EncodeToString(v)
+}
+
+func load(n string, assets map[string][]byte) []byte {
+ return assets[n]
+}
+
+func renderFile(name string, tb []byte, data interface{}) ([]byte, error) {
+ tmpl, err := template.New(name).Funcs(templateFuncs).Parse(string(tb))
+ if err != nil {
+ return nil, err
+ }
+ var buf bytes.Buffer
+ if err := tmpl.Execute(&buf, data); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
diff --git a/vendor/github.com/openshift/api/payload-command/render/renderassets/write.go b/vendor/github.com/openshift/api/payload-command/render/renderassets/write.go
new file mode 100644
index 0000000000..98818a4ef8
--- /dev/null
+++ b/vendor/github.com/openshift/api/payload-command/render/renderassets/write.go
@@ -0,0 +1,31 @@
+package assets
+
+import (
+ "fmt"
+ "path/filepath"
+)
+
+// SubstituteAndCopyFiles read files from the input dir, selects some by predicate, transforms them, and writes the content to output dir.
+func SubstituteAndCopyFiles(assetInputDir, assetOutputDir, featureSet, clusterProfile string, templateData interface{}, additionalPredicates ...FileInfoPredicate) error {
+ defaultPredicates := []FileInfoPredicate{OnlyYaml}
+ manifestPredicates := []FileContentsPredicate{
+ InstallerFeatureSet(featureSet),
+ ClusterProfile(clusterProfile),
+ }
+
+ // write assets
+ manifests, err := New(
+ assetInputDir,
+ templateData,
+ manifestPredicates,
+ append(additionalPredicates, defaultPredicates...)...,
+ )
+ if err != nil {
+ return fmt.Errorf("failed rendering assets: %v", err)
+ }
+ if err := manifests.WriteFiles(filepath.Join(assetOutputDir)); err != nil {
+ return fmt.Errorf("failed writing assets to %q: %v", filepath.Join(assetOutputDir), err)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/payload-command/render/write_featureset.go b/vendor/github.com/openshift/api/payload-command/render/write_featureset.go
new file mode 100644
index 0000000000..a17195f130
--- /dev/null
+++ b/vendor/github.com/openshift/api/payload-command/render/write_featureset.go
@@ -0,0 +1,89 @@
+package render
+
+import (
+ "flag"
+ "fmt"
+ configv1 "github.com/openshift/api/config/v1"
+ "github.com/openshift/api/features"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "os"
+ "path/filepath"
+)
+
+var (
+ clusterProfileToShortName = map[features.ClusterProfileName]string{
+ features.Hypershift: "Hypershift",
+ features.SelfManaged: "SelfManagedHA",
+ }
+)
+
+// WriteFeatureSets holds values to drive the render command.
+type WriteFeatureSets struct {
+ PayloadVersion string
+ AssetOutputDir string
+}
+
+func (o *WriteFeatureSets) AddFlags(fs *flag.FlagSet) {
+ fs.StringVar(&o.PayloadVersion, "payload-version", o.PayloadVersion, "Version that will eventually be placed into ClusterOperator.status. This normally comes from the CVO set via env var: OPERATOR_IMAGE_VERSION.")
+ fs.StringVar(&o.AssetOutputDir, "asset-output-dir", o.AssetOutputDir, "Output path for rendered manifests.")
+}
+
+// Validate verifies the inputs.
+func (o *WriteFeatureSets) Validate() error {
+ return nil
+}
+
+// Complete fills in missing values before command execution.
+func (o *WriteFeatureSets) Complete() error {
+ return nil
+}
+
+// Run contains the logic of the render command.
+func (o *WriteFeatureSets) Run() error {
+ err := os.MkdirAll(o.AssetOutputDir, 0755)
+ if err != nil {
+ return err
+ }
+
+ statusByClusterProfileByFeatureSet := features.AllFeatureSets()
+ for clusterProfile, byFeatureSet := range statusByClusterProfileByFeatureSet {
+ for featureSetName, featureGateStatuses := range byFeatureSet {
+ currentDetails := FeaturesGateDetailsFromFeatureSets(featureGateStatuses, o.PayloadVersion)
+
+ featureGateInstance := &configv1.FeatureGate{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "cluster",
+ Annotations: map[string]string{
+ // we can't do this because it will get the manifest included by the CVO and that isn't what we want
+ // this makes it interesting to indicate which cluster-profile the cluster-config-operator should use
+ //string(clusterProfile): "true",
+ string(clusterProfile): "false-except-for-the-config-operator",
+ },
+ },
+ Spec: configv1.FeatureGateSpec{
+ FeatureGateSelection: configv1.FeatureGateSelection{
+ FeatureSet: featureSetName,
+ },
+ },
+ Status: configv1.FeatureGateStatus{
+ FeatureGates: []configv1.FeatureGateDetails{
+ *currentDetails,
+ },
+ },
+ }
+
+ featureGateOutBytes := writeFeatureGateV1OrDie(featureGateInstance)
+ featureSetFileName := fmt.Sprintf("featureGate-%s-%s.yaml", clusterProfileToShortName[clusterProfile], featureSetName)
+ if len(featureSetName) == 0 {
+ featureSetFileName = fmt.Sprintf("featureGate-%s-%s.yaml", clusterProfileToShortName[clusterProfile], "Default")
+ }
+
+ destFile := filepath.Join(o.AssetOutputDir, featureSetFileName)
+ if err := os.WriteFile(destFile, []byte(featureGateOutBytes), 0644); err != nil {
+ return fmt.Errorf("error writing FeatureGate manifest: %w", err)
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/route/v1/generated.proto b/vendor/github.com/openshift/api/route/v1/generated.proto
index 1797fe7702..621bec09b0 100644
--- a/vendor/github.com/openshift/api/route/v1/generated.proto
+++ b/vendor/github.com/openshift/api/route/v1/generated.proto
@@ -436,8 +436,12 @@ message TLSConfig {
// insecureEdgeTerminationPolicy indicates the desired behavior for insecure connections to a route. While
// each router may make its own decisions on which ports to expose, this is normally port 80.
//
- // * Allow - traffic is sent to the server on the insecure port (edge/reencrypt terminations only) (default).
- // * None - no traffic is allowed on the insecure port.
+ // If a route does not specify insecureEdgeTerminationPolicy, then the default behavior is "None".
+ //
+ // * Allow - traffic is sent to the server on the insecure port (edge/reencrypt terminations only).
+ //
+ // * None - no traffic is allowed on the insecure port (default).
+ //
// * Redirect - clients are redirected to the secure port.
//
// +kubebuilder:validation:Enum=Allow;None;Redirect;""
diff --git a/vendor/github.com/openshift/api/route/v1/types.go b/vendor/github.com/openshift/api/route/v1/types.go
index cd5e5eced4..fadc4b618b 100644
--- a/vendor/github.com/openshift/api/route/v1/types.go
+++ b/vendor/github.com/openshift/api/route/v1/types.go
@@ -447,8 +447,12 @@ type TLSConfig struct {
// insecureEdgeTerminationPolicy indicates the desired behavior for insecure connections to a route. While
// each router may make its own decisions on which ports to expose, this is normally port 80.
//
- // * Allow - traffic is sent to the server on the insecure port (edge/reencrypt terminations only) (default).
- // * None - no traffic is allowed on the insecure port.
+ // If a route does not specify insecureEdgeTerminationPolicy, then the default behavior is "None".
+ //
+ // * Allow - traffic is sent to the server on the insecure port (edge/reencrypt terminations only).
+ //
+ // * None - no traffic is allowed on the insecure port (default).
+ //
// * Redirect - clients are redirected to the secure port.
//
// +kubebuilder:validation:Enum=Allow;None;Redirect;""
diff --git a/vendor/github.com/openshift/api/route/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/route/v1/zz_generated.swagger_doc_generated.go
index c65815a1cc..56a4e23e3d 100644
--- a/vendor/github.com/openshift/api/route/v1/zz_generated.swagger_doc_generated.go
+++ b/vendor/github.com/openshift/api/route/v1/zz_generated.swagger_doc_generated.go
@@ -178,7 +178,7 @@ var map_TLSConfig = map[string]string{
"key": "key provides key file contents",
"caCertificate": "caCertificate provides the cert authority certificate contents",
"destinationCACertificate": "destinationCACertificate provides the contents of the ca certificate of the final destination. When using reencrypt termination this file should be provided in order to have routers use it for health checks on the secure connection. If this field is not specified, the router may provide its own destination CA and perform hostname validation using the short service name (service.namespace.svc), which allows infrastructure generated certificates to automatically verify.",
- "insecureEdgeTerminationPolicy": "insecureEdgeTerminationPolicy indicates the desired behavior for insecure connections to a route. While each router may make its own decisions on which ports to expose, this is normally port 80.\n\n* Allow - traffic is sent to the server on the insecure port (edge/reencrypt terminations only) (default). * None - no traffic is allowed on the insecure port. * Redirect - clients are redirected to the secure port.",
+ "insecureEdgeTerminationPolicy": "insecureEdgeTerminationPolicy indicates the desired behavior for insecure connections to a route. While each router may make its own decisions on which ports to expose, this is normally port 80.\n\nIf a route does not specify insecureEdgeTerminationPolicy, then the default behavior is \"None\".\n\n* Allow - traffic is sent to the server on the insecure port (edge/reencrypt terminations only).\n\n* None - no traffic is allowed on the insecure port (default).\n\n* Redirect - clients are redirected to the secure port.",
"externalCertificate": "externalCertificate provides certificate contents as a secret reference. This should be a single serving certificate, not a certificate chain. Do not include a CA certificate. The secret referenced should be present in the same namespace as that of the Route. Forbidden when `certificate` is set.",
}
diff --git a/vendor/github.com/openshift/cluster-config-operator/LICENSE b/vendor/github.com/openshift/cluster-config-operator/LICENSE
deleted file mode 100644
index 261eeb9e9f..0000000000
--- a/vendor/github.com/openshift/cluster-config-operator/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/openshift/cluster-config-operator/pkg/operator/featuregates/OWNERS b/vendor/github.com/openshift/cluster-config-operator/pkg/operator/featuregates/OWNERS
deleted file mode 100644
index ba4afac20e..0000000000
--- a/vendor/github.com/openshift/cluster-config-operator/pkg/operator/featuregates/OWNERS
+++ /dev/null
@@ -1,9 +0,0 @@
-# https://github.com/openshift/installer/blob/75738a342c1973121eedda7d91096d21c19194c9/OWNERS_ALIASES#L47-L50
-
-reviewers:
-- deads2k
-- joelspeed
-approvers:
-# these are the api-approvers from openshift/api
-- deads2k
-- joelspeed
\ No newline at end of file
diff --git a/vendor/github.com/openshift/cluster-config-operator/pkg/operator/featuregates/featuregate_controller.go b/vendor/github.com/openshift/cluster-config-operator/pkg/operator/featuregates/featuregate_controller.go
deleted file mode 100644
index b819a068e4..0000000000
--- a/vendor/github.com/openshift/cluster-config-operator/pkg/operator/featuregates/featuregate_controller.go
+++ /dev/null
@@ -1,230 +0,0 @@
-package featuregates
-
-import (
- "context"
- "fmt"
- "reflect"
- "sort"
- "strings"
- "time"
-
- configv1 "github.com/openshift/api/config/v1"
- configv1client "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1"
- v1 "github.com/openshift/client-go/config/informers/externalversions/config/v1"
- configlistersv1 "github.com/openshift/client-go/config/listers/config/v1"
- "github.com/openshift/library-go/pkg/controller/factory"
- "github.com/openshift/library-go/pkg/operator/events"
- "github.com/openshift/library-go/pkg/operator/status"
- operatorv1helpers "github.com/openshift/library-go/pkg/operator/v1helpers"
- apierrors "k8s.io/apimachinery/pkg/api/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/util/sets"
-)
-
-const FeatureVersionName = "feature-gates"
-
-// FeatureGateController is responsible for setting usable FeatureGates on features.config.openshift.io/cluster
-type FeatureGateController struct {
- processVersion string
- featureGatesClient configv1client.FeatureGatesGetter
- featureGatesLister configlistersv1.FeatureGateLister
- clusterVersionLister configlistersv1.ClusterVersionLister
- // for unit testing
- featureSetMap map[configv1.FeatureSet]*configv1.FeatureGateEnabledDisabled
-
- versionRecorder status.VersionGetter
- eventRecorder events.Recorder
-}
-
-// NewController returns a new FeatureGateController.
-func NewFeatureGateController(
- featureGateDetails map[configv1.FeatureSet]*configv1.FeatureGateEnabledDisabled,
- operatorClient operatorv1helpers.OperatorClient,
- processVersion string,
- featureGatesClient configv1client.FeatureGatesGetter, featureGatesInformer v1.FeatureGateInformer,
- clusterVersionInformer v1.ClusterVersionInformer,
- versionRecorder status.VersionGetter,
- eventRecorder events.Recorder) factory.Controller {
- c := &FeatureGateController{
- processVersion: processVersion,
- featureGatesClient: featureGatesClient,
- featureGatesLister: featureGatesInformer.Lister(),
- clusterVersionLister: clusterVersionInformer.Lister(),
- featureSetMap: featureGateDetails,
- versionRecorder: versionRecorder,
- eventRecorder: eventRecorder,
- }
-
- return factory.New().
- WithInformers(
- operatorClient.Informer(),
- featureGatesInformer.Informer(),
- clusterVersionInformer.Informer(),
- ).
- WithSync(c.sync).
- WithSyncDegradedOnError(operatorClient).
- ResyncEvery(time.Minute).
- ToController("FeatureGateController", eventRecorder)
-}
-
-func (c FeatureGateController) sync(ctx context.Context, syncCtx factory.SyncContext) error {
- featureGates, err := c.featureGatesLister.Get("cluster")
- if apierrors.IsNotFound(err) {
- return nil
- }
- if err != nil {
- return fmt.Errorf("unable to get FeatureGate: %w", err)
- }
-
- clusterVersion, err := c.clusterVersionLister.Get("version")
- if apierrors.IsNotFound(err) {
- return nil
- }
- if err != nil {
- return fmt.Errorf("unable to get ClusterVersion: %w", err)
- }
-
- knownVersions := sets.NewString(c.processVersion)
- for _, cvoVersion := range clusterVersion.Status.History {
- knownVersions.Insert(cvoVersion.Version)
- }
-
- currentDetails, err := FeaturesGateDetailsFromFeatureSets(c.featureSetMap, featureGates, c.processVersion)
- if err != nil {
- return fmt.Errorf("unable to determine FeatureGateDetails from FeatureSets: %w", err)
- }
- // desiredFeatureGates will include first, the current version's feature gates
- // then all the historical featuregates in order, removing those for versions not in the CVO history.
- desiredFeatureGates := []configv1.FeatureGateDetails{*currentDetails}
-
- for i := range featureGates.Status.FeatureGates {
- featureGateValues := featureGates.Status.FeatureGates[i]
- if featureGateValues.Version == c.processVersion {
- // we already added our processVersion
- continue
- }
- if !knownVersions.Has(featureGateValues.Version) {
- continue
- }
- desiredFeatureGates = append(desiredFeatureGates, featureGateValues)
- }
-
- if reflect.DeepEqual(desiredFeatureGates, featureGates.Status.FeatureGates) {
- // no update, confirm in the clusteroperator that the version has been achieved.
- c.versionRecorder.SetVersion(
- FeatureVersionName,
- c.processVersion,
- )
-
- return nil
- }
-
- // TODO, this looks ripe for SSA.
- toWrite := featureGates.DeepCopy()
- toWrite.Status.FeatureGates = desiredFeatureGates
- if _, err := c.featureGatesClient.FeatureGates().UpdateStatus(ctx, toWrite, metav1.UpdateOptions{}); err != nil {
- return fmt.Errorf("unable to update FeatureGate status: %w", err)
- }
-
- enabled, disabled := []string{}, []string{}
- for _, curr := range currentDetails.Enabled {
- enabled = append(enabled, string(curr.Name))
- }
- for _, curr := range currentDetails.Disabled {
- disabled = append(disabled, string(curr.Name))
- }
- c.eventRecorder.Eventf(
- "FeatureGateUpdate", "FeatureSet=%q, Version=%q, Enabled=%q, Disabled=%q",
- toWrite.Spec.FeatureSet, c.processVersion, strings.Join(enabled, ","), strings.Join(disabled, ","))
- // on successful write, we're at the correct level
- c.versionRecorder.SetVersion(
- FeatureVersionName,
- c.processVersion,
- )
-
- return nil
-}
-
-func featuresGatesFromFeatureSets(knownFeatureSets map[configv1.FeatureSet]*configv1.FeatureGateEnabledDisabled, featureGates *configv1.FeatureGate) ([]configv1.FeatureGateName, []configv1.FeatureGateName, error) {
- if featureGates.Spec.FeatureSet == configv1.CustomNoUpgrade {
- if featureGates.Spec.FeatureGateSelection.CustomNoUpgrade != nil {
- completeEnabled, completeDisabled := completeFeatureGates(knownFeatureSets, featureGates.Spec.FeatureGateSelection.CustomNoUpgrade.Enabled, featureGates.Spec.FeatureGateSelection.CustomNoUpgrade.Disabled)
- return completeEnabled, completeDisabled, nil
- }
- return []configv1.FeatureGateName{}, []configv1.FeatureGateName{}, nil
- }
-
- featureSet, ok := knownFeatureSets[featureGates.Spec.FeatureSet]
- if !ok {
- return []configv1.FeatureGateName{}, []configv1.FeatureGateName{}, fmt.Errorf(".spec.featureSet %q not found", featureSet)
- }
-
- completeEnabled, completeDisabled := completeFeatureGates(knownFeatureSets, toFeatureGateNames(featureSet.Enabled), toFeatureGateNames(featureSet.Disabled))
- return completeEnabled, completeDisabled, nil
-}
-
-func toFeatureGateNames(in []configv1.FeatureGateDescription) []configv1.FeatureGateName {
- out := []configv1.FeatureGateName{}
- for _, curr := range in {
- out = append(out, curr.FeatureGateAttributes.Name)
- }
-
- return out
-}
-
-// completeFeatureGates identifies every known feature and ensures that is explicitly on or explicitly off
-func completeFeatureGates(knownFeatureSets map[configv1.FeatureSet]*configv1.FeatureGateEnabledDisabled, enabled, disabled []configv1.FeatureGateName) ([]configv1.FeatureGateName, []configv1.FeatureGateName) {
- specificallyEnabledFeatureGates := sets.New[configv1.FeatureGateName]()
- specificallyEnabledFeatureGates.Insert(enabled...)
-
- knownFeatureGates := sets.New[configv1.FeatureGateName]()
- knownFeatureGates.Insert(enabled...)
- knownFeatureGates.Insert(disabled...)
- for _, known := range knownFeatureSets {
- for _, curr := range known.Disabled {
- knownFeatureGates.Insert(curr.FeatureGateAttributes.Name)
- }
- for _, curr := range known.Enabled {
- knownFeatureGates.Insert(curr.FeatureGateAttributes.Name)
- }
- }
-
- return enabled, knownFeatureGates.Difference(specificallyEnabledFeatureGates).UnsortedList()
-}
-
-func FeaturesGateDetailsFromFeatureSets(featureSetMap map[configv1.FeatureSet]*configv1.FeatureGateEnabledDisabled, featureGates *configv1.FeatureGate, currentVersion string) (*configv1.FeatureGateDetails, error) {
- enabled, disabled, err := featuresGatesFromFeatureSets(featureSetMap, featureGates)
- if err != nil {
- return nil, err
- }
- currentDetails := configv1.FeatureGateDetails{
- Version: currentVersion,
- }
- for _, gateName := range enabled {
- currentDetails.Enabled = append(currentDetails.Enabled, configv1.FeatureGateAttributes{
- Name: gateName,
- })
- }
- for _, gateName := range disabled {
- currentDetails.Disabled = append(currentDetails.Disabled, configv1.FeatureGateAttributes{
- Name: gateName,
- })
- }
-
- // sort for stability
- sort.Sort(byName(currentDetails.Enabled))
- sort.Sort(byName(currentDetails.Disabled))
-
- return ¤tDetails, nil
-}
-
-type byName []configv1.FeatureGateAttributes
-
-func (a byName) Len() int { return len(a) }
-func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-func (a byName) Less(i, j int) bool {
- if strings.Compare(string(a[i].Name), string(a[j].Name)) < 0 {
- return true
- }
- return false
-}
diff --git a/vendor/github.com/openshift/library-go/pkg/cloudprovider/external.go b/vendor/github.com/openshift/library-go/pkg/cloudprovider/external.go
index 9af2688154..6a459f9f56 100644
--- a/vendor/github.com/openshift/library-go/pkg/cloudprovider/external.go
+++ b/vendor/github.com/openshift/library-go/pkg/cloudprovider/external.go
@@ -4,22 +4,23 @@ import (
"fmt"
configv1 "github.com/openshift/api/config/v1"
+ features "github.com/openshift/api/features"
)
var (
// ExternalCloudProviderFeature is the name of the external cloud provider feature gate.
// This is used to flag to operators that the cluster should be using the external cloud-controller-manager
// rather than the in-tree cloud controller loops.
- ExternalCloudProviderFeature = configv1.FeatureGateExternalCloudProvider
+ ExternalCloudProviderFeature = features.FeatureGateExternalCloudProvider
// ExternalCloudProviderFeatureAzure is the name of the external cloud provider feature gate for Azure.
- ExternalCloudProviderFeatureAzure = configv1.FeatureGateExternalCloudProviderAzure
+ ExternalCloudProviderFeatureAzure = features.FeatureGateExternalCloudProviderAzure
// ExternalCloudProviderFeatureGCP is the name of the external cloud provider feature gate for GCP.
- ExternalCloudProviderFeatureGCP = configv1.FeatureGateExternalCloudProviderGCP
+ ExternalCloudProviderFeatureGCP = features.FeatureGateExternalCloudProviderGCP
// ExternalCloudProviderFeatureExternal is the name of the external cloud provider feature gate for External platform.
- ExternalCloudProviderFeatureExternal = configv1.FeatureGateExternalCloudProviderExternal
+ ExternalCloudProviderFeatureExternal = features.FeatureGateExternalCloudProviderExternal
)
// IsCloudProviderExternal is used to check whether external cloud provider settings should be used in a component.
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/status/condition.go b/vendor/github.com/openshift/library-go/pkg/operator/status/condition.go
deleted file mode 100644
index 56e1496b85..0000000000
--- a/vendor/github.com/openshift/library-go/pkg/operator/status/condition.go
+++ /dev/null
@@ -1,169 +0,0 @@
-package status
-
-import (
- "fmt"
- "sort"
- "strings"
- "time"
-
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
- configv1 "github.com/openshift/api/config/v1"
- operatorv1 "github.com/openshift/api/operator/v1"
-)
-
-// UnionCondition returns a single operator condition that is the union of multiple operator conditions.
-//
-// defaultConditionStatus indicates whether you want to merge all Falses or merge all Trues. For instance, Failures merge
-// on true, but Available merges on false. Thing of it like an anti-default.
-//
-// If inertia is non-nil, then resist returning a condition with a status opposite the defaultConditionStatus.
-func UnionCondition(conditionType string, defaultConditionStatus operatorv1.ConditionStatus, inertia Inertia, allConditions ...operatorv1.OperatorCondition) operatorv1.OperatorCondition {
- var oppositeConditionStatus operatorv1.ConditionStatus
- if defaultConditionStatus == operatorv1.ConditionTrue {
- oppositeConditionStatus = operatorv1.ConditionFalse
- } else {
- oppositeConditionStatus = operatorv1.ConditionTrue
- }
-
- interestingConditions := []operatorv1.OperatorCondition{}
- badConditions := []operatorv1.OperatorCondition{}
- badConditionStatus := operatorv1.ConditionUnknown
- for _, condition := range allConditions {
- if strings.HasSuffix(condition.Type, conditionType) {
- interestingConditions = append(interestingConditions, condition)
-
- if condition.Status != defaultConditionStatus {
- badConditions = append(badConditions, condition)
- if condition.Status == oppositeConditionStatus {
- badConditionStatus = oppositeConditionStatus
- }
- }
- }
- }
- sort.Sort(byConditionType(badConditions))
-
- unionedCondition := operatorv1.OperatorCondition{Type: conditionType, Status: operatorv1.ConditionUnknown}
- if len(interestingConditions) == 0 {
- unionedCondition.Status = operatorv1.ConditionUnknown
- unionedCondition.Reason = "NoData"
- return unionedCondition
- }
-
- var elderBadConditions []operatorv1.OperatorCondition
- if inertia == nil {
- elderBadConditions = badConditions
- } else {
- now := time.Now()
- for _, condition := range badConditions {
- if condition.LastTransitionTime.Time.Before(now.Add(-inertia(condition))) {
- elderBadConditions = append(elderBadConditions, condition)
- }
- }
- }
-
- if len(elderBadConditions) == 0 {
- unionedCondition.Status = defaultConditionStatus
- unionedCondition.Message = unionMessage(interestingConditions)
- if unionedCondition.Message == "" {
- unionedCondition.Message = "All is well"
- }
- unionedCondition.Reason = "AsExpected"
- unionedCondition.LastTransitionTime = latestTransitionTime(interestingConditions)
-
- return unionedCondition
- }
-
- // at this point we have bad conditions
- unionedCondition.Status = badConditionStatus
- unionedCondition.Message = unionMessage(badConditions)
- unionedCondition.Reason = unionReason(conditionType, badConditions)
- unionedCondition.LastTransitionTime = latestTransitionTime(badConditions)
-
- return unionedCondition
-}
-
-// UnionClusterCondition returns a single cluster operator condition that is the union of multiple operator conditions.
-//
-// defaultConditionStatus indicates whether you want to merge all Falses or merge all Trues. For instance, Failures merge
-// on true, but Available merges on false. Thing of it like an anti-default.
-//
-// If inertia is non-nil, then resist returning a condition with a status opposite the defaultConditionStatus.
-func UnionClusterCondition(conditionType configv1.ClusterStatusConditionType, defaultConditionStatus operatorv1.ConditionStatus, inertia Inertia, allConditions ...operatorv1.OperatorCondition) configv1.ClusterOperatorStatusCondition {
- cnd := UnionCondition(string(conditionType), defaultConditionStatus, inertia, allConditions...)
- return OperatorConditionToClusterOperatorCondition(cnd)
-}
-
-func OperatorConditionToClusterOperatorCondition(condition operatorv1.OperatorCondition) configv1.ClusterOperatorStatusCondition {
- return configv1.ClusterOperatorStatusCondition{
- Type: configv1.ClusterStatusConditionType(condition.Type),
- Status: configv1.ConditionStatus(condition.Status),
- LastTransitionTime: condition.LastTransitionTime,
- Reason: condition.Reason,
- Message: condition.Message,
- }
-}
-func latestTransitionTime(conditions []operatorv1.OperatorCondition) metav1.Time {
- latestTransitionTime := metav1.Time{}
- for _, condition := range conditions {
- if latestTransitionTime.Before(&condition.LastTransitionTime) {
- latestTransitionTime = condition.LastTransitionTime
- }
- }
- return latestTransitionTime
-}
-
-func uniq(s []string) []string {
- seen := make(map[string]struct{}, len(s))
- j := 0
- for _, v := range s {
- if _, ok := seen[v]; ok {
- continue
- }
- seen[v] = struct{}{}
- s[j] = v
- j++
- }
- return s[:j]
-}
-
-func unionMessage(conditions []operatorv1.OperatorCondition) string {
- messages := []string{}
- for _, condition := range conditions {
- if len(condition.Message) == 0 {
- continue
- }
- for _, message := range uniq(strings.Split(condition.Message, "\n")) {
- messages = append(messages, fmt.Sprintf("%s: %s", condition.Type, message))
- }
- }
- return strings.Join(messages, "\n")
-}
-
-func unionReason(unionConditionType string, conditions []operatorv1.OperatorCondition) string {
- typeReasons := []string{}
- for _, curr := range conditions {
- currType := curr.Type[:len(curr.Type)-len(unionConditionType)]
- if len(curr.Reason) > 0 {
- typeReasons = append(typeReasons, currType+"_"+curr.Reason)
- } else {
- typeReasons = append(typeReasons, currType)
- }
- }
- sort.Strings(typeReasons)
- return strings.Join(typeReasons, "::")
-}
-
-type byConditionType []operatorv1.OperatorCondition
-
-var _ sort.Interface = byConditionType{}
-
-func (s byConditionType) Len() int {
- return len(s)
-}
-func (s byConditionType) Less(i, j int) bool {
- return s[i].Type < s[j].Type
-}
-func (s byConditionType) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/status/inertia.go b/vendor/github.com/openshift/library-go/pkg/operator/status/inertia.go
deleted file mode 100644
index 3ac29082e0..0000000000
--- a/vendor/github.com/openshift/library-go/pkg/operator/status/inertia.go
+++ /dev/null
@@ -1,66 +0,0 @@
-package status
-
-import (
- "fmt"
- "regexp"
- "time"
-
- operatorv1 "github.com/openshift/api/operator/v1"
-)
-
-// Inertia returns the inertial duration for the given condition.
-type Inertia func(condition operatorv1.OperatorCondition) time.Duration
-
-// InertiaCondition configures an inertia duration for a given set of
-// condition types.
-type InertiaCondition struct {
- // ConditionTypeMatcher is a regular expression selecting condition types
- // with which this InertiaCondition is associated.
- ConditionTypeMatcher *regexp.Regexp
-
- // Duration is the inertial duration for associated conditions.
- Duration time.Duration
-}
-
-// InertiaConfig holds configuration for an Inertia implementation.
-type InertiaConfig struct {
- defaultDuration time.Duration
- conditions []InertiaCondition
-}
-
-// NewInertia creates a new InertiaConfig object. Conditions are
-// applied in the given order, so a condition type matching multiple
-// regular expressions will have the duration associated with the first
-// matching entry.
-func NewInertia(defaultDuration time.Duration, conditions ...InertiaCondition) (*InertiaConfig, error) {
- for i, condition := range conditions {
- if condition.ConditionTypeMatcher == nil {
- return nil, fmt.Errorf("condition %d has a nil ConditionTypeMatcher", i)
- }
- }
-
- return &InertiaConfig{
- defaultDuration: defaultDuration,
- conditions: conditions,
- }, nil
-}
-
-// MustNewInertia is like NewInertia but panics on error.
-func MustNewInertia(defaultDuration time.Duration, conditions ...InertiaCondition) *InertiaConfig {
- inertia, err := NewInertia(defaultDuration, conditions...)
- if err != nil {
- panic(err)
- }
-
- return inertia
-}
-
-// Inertia returns the configured inertia for the given condition type.
-func (c *InertiaConfig) Inertia(condition operatorv1.OperatorCondition) time.Duration {
- for _, matcher := range c.conditions {
- if matcher.ConditionTypeMatcher.MatchString(condition.Type) {
- return matcher.Duration
- }
- }
- return c.defaultDuration
-}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/status/status_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/status/status_controller.go
deleted file mode 100644
index 864f1e43d4..0000000000
--- a/vendor/github.com/openshift/library-go/pkg/operator/status/status_controller.go
+++ /dev/null
@@ -1,281 +0,0 @@
-package status
-
-import (
- "context"
- "strings"
- "time"
-
- "k8s.io/klog/v2"
-
- configv1 "github.com/openshift/api/config/v1"
- operatorv1 "github.com/openshift/api/operator/v1"
- configv1client "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1"
- configv1informers "github.com/openshift/client-go/config/informers/externalversions/config/v1"
- configv1listers "github.com/openshift/client-go/config/listers/config/v1"
- "k8s.io/apimachinery/pkg/api/equality"
- apierrors "k8s.io/apimachinery/pkg/api/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- utilruntime "k8s.io/apimachinery/pkg/util/runtime"
-
- configv1helpers "github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers"
- "github.com/openshift/library-go/pkg/controller/factory"
- "github.com/openshift/library-go/pkg/operator/events"
- "github.com/openshift/library-go/pkg/operator/management"
- "github.com/openshift/library-go/pkg/operator/resource/resourceapply"
- operatorv1helpers "github.com/openshift/library-go/pkg/operator/v1helpers"
-)
-
-type VersionGetter interface {
- // SetVersion is a way to set the version for an operand. It must be thread-safe
- SetVersion(operandName, version string)
- // GetVersion is way to get the versions for all operands. It must be thread-safe and return an object that doesn't mutate
- GetVersions() map[string]string
- // VersionChangedChannel is a channel that will get an item whenever SetVersion has been called
- VersionChangedChannel() <-chan struct{}
-}
-
-type RelatedObjectsFunc func() (isset bool, objs []configv1.ObjectReference)
-
-type StatusSyncer struct {
- clusterOperatorName string
- relatedObjects []configv1.ObjectReference
- relatedObjectsFunc RelatedObjectsFunc
-
- versionGetter VersionGetter
- operatorClient operatorv1helpers.OperatorClient
- clusterOperatorClient configv1client.ClusterOperatorsGetter
- clusterOperatorLister configv1listers.ClusterOperatorLister
-
- controllerFactory *factory.Factory
- recorder events.Recorder
- degradedInertia Inertia
-
- removeUnusedVersions bool
-}
-
-var _ factory.Controller = &StatusSyncer{}
-
-func (c *StatusSyncer) Name() string {
- return c.clusterOperatorName
-}
-
-func NewClusterOperatorStatusController(
- name string,
- relatedObjects []configv1.ObjectReference,
- clusterOperatorClient configv1client.ClusterOperatorsGetter,
- clusterOperatorInformer configv1informers.ClusterOperatorInformer,
- operatorClient operatorv1helpers.OperatorClient,
- versionGetter VersionGetter,
- recorder events.Recorder,
-) *StatusSyncer {
- return &StatusSyncer{
- clusterOperatorName: name,
- relatedObjects: relatedObjects,
- versionGetter: versionGetter,
- clusterOperatorClient: clusterOperatorClient,
- clusterOperatorLister: clusterOperatorInformer.Lister(),
- operatorClient: operatorClient,
- degradedInertia: MustNewInertia(2 * time.Minute).Inertia,
- controllerFactory: factory.New().ResyncEvery(time.Minute).WithInformers(
- operatorClient.Informer(),
- clusterOperatorInformer.Informer(),
- ),
- recorder: recorder.WithComponentSuffix("status-controller"),
- }
-}
-
-// WithRelatedObjectsFunc allows the set of related objects to be dynamically
-// determined.
-//
-// The function returns (isset, objects)
-//
-// If isset is false, then the set of related objects is copied over from the
-// existing ClusterOperator object. This is useful in cases where an operator
-// has just restarted, and hasn't yet reconciled.
-//
-// Any statically-defined related objects (in NewClusterOperatorStatusController)
-// will always be included in the result.
-func (c *StatusSyncer) WithRelatedObjectsFunc(f RelatedObjectsFunc) {
- c.relatedObjectsFunc = f
-}
-
-func (c *StatusSyncer) Run(ctx context.Context, workers int) {
- c.controllerFactory.WithPostStartHooks(c.watchVersionGetterPostRunHook).WithSync(c.Sync).ToController("StatusSyncer_"+c.Name(), c.recorder).Run(ctx, workers)
-}
-
-// WithDegradedInertia returns a copy of the StatusSyncer with the
-// requested inertia function for degraded conditions.
-func (c *StatusSyncer) WithDegradedInertia(inertia Inertia) *StatusSyncer {
- output := *c
- output.degradedInertia = inertia
- return &output
-}
-
-// WithVersionRemoval returns a copy of the StatusSyncer that will
-// remove versions that are missing in VersionGetter from the status.
-func (c *StatusSyncer) WithVersionRemoval() *StatusSyncer {
- output := *c
- output.removeUnusedVersions = true
- return &output
-}
-
-// sync reacts to a change in prereqs by finding information that is required to match another value in the cluster. This
-// must be information that is logically "owned" by another component.
-func (c StatusSyncer) Sync(ctx context.Context, syncCtx factory.SyncContext) error {
- detailedSpec, currentDetailedStatus, _, err := c.operatorClient.GetOperatorState()
- if apierrors.IsNotFound(err) {
- syncCtx.Recorder().Warningf("StatusNotFound", "Unable to determine current operator status for clusteroperator/%s", c.clusterOperatorName)
- if err := c.clusterOperatorClient.ClusterOperators().Delete(ctx, c.clusterOperatorName, metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) {
- return err
- }
- return nil
- }
- if err != nil {
- return err
- }
-
- originalClusterOperatorObj, err := c.clusterOperatorLister.Get(c.clusterOperatorName)
- if err != nil && !apierrors.IsNotFound(err) {
- syncCtx.Recorder().Warningf("StatusFailed", "Unable to get current operator status for clusteroperator/%s: %v", c.clusterOperatorName, err)
- return err
- }
-
- // ensure that we have a clusteroperator resource
- if originalClusterOperatorObj == nil || apierrors.IsNotFound(err) {
- klog.Infof("clusteroperator/%s not found", c.clusterOperatorName)
- var createErr error
- originalClusterOperatorObj, createErr = c.clusterOperatorClient.ClusterOperators().Create(ctx, &configv1.ClusterOperator{
- ObjectMeta: metav1.ObjectMeta{Name: c.clusterOperatorName},
- }, metav1.CreateOptions{})
- if apierrors.IsNotFound(createErr) {
- // this means that the API isn't present. We did not fail. Try again later
- klog.Infof("ClusterOperator API not created")
- syncCtx.Queue().AddRateLimited(factory.DefaultQueueKey)
- return nil
- }
- if createErr != nil {
- syncCtx.Recorder().Warningf("StatusCreateFailed", "Failed to create operator status: %v", createErr)
- return createErr
- }
- }
- clusterOperatorObj := originalClusterOperatorObj.DeepCopy()
-
- if detailedSpec.ManagementState == operatorv1.Unmanaged && !management.IsOperatorAlwaysManaged() {
- configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorAvailable, Status: configv1.ConditionUnknown, Reason: "Unmanaged"})
- configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorProgressing, Status: configv1.ConditionUnknown, Reason: "Unmanaged"})
- configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorDegraded, Status: configv1.ConditionUnknown, Reason: "Unmanaged"})
- configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorUpgradeable, Status: configv1.ConditionUnknown, Reason: "Unmanaged"})
- configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.EvaluationConditionsDetected, Status: configv1.ConditionUnknown, Reason: "Unmanaged"})
-
- if equality.Semantic.DeepEqual(clusterOperatorObj, originalClusterOperatorObj) {
- return nil
- }
- if _, err := c.clusterOperatorClient.ClusterOperators().UpdateStatus(ctx, clusterOperatorObj, metav1.UpdateOptions{}); err != nil {
- return err
- }
- if !skipOperatorStatusChangedEvent(originalClusterOperatorObj.Status, clusterOperatorObj.Status) {
- syncCtx.Recorder().Eventf("OperatorStatusChanged", "Status for operator %s changed: %s", c.clusterOperatorName, configv1helpers.GetStatusDiff(originalClusterOperatorObj.Status, clusterOperatorObj.Status))
- }
- return nil
- }
-
- if c.relatedObjectsFunc != nil {
- isSet, ro := c.relatedObjectsFunc()
- if !isSet { // temporarily unknown - copy over from existing object
- ro = clusterOperatorObj.Status.RelatedObjects
- }
-
- // merge in any static objects
- for _, obj := range c.relatedObjects {
- found := false
- for _, existingObj := range ro {
- if obj == existingObj {
- found = true
- break
- }
- }
- if !found {
- ro = append(ro, obj)
- }
- }
- clusterOperatorObj.Status.RelatedObjects = ro
- } else {
- clusterOperatorObj.Status.RelatedObjects = c.relatedObjects
- }
-
- configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, UnionClusterCondition(configv1.OperatorDegraded, operatorv1.ConditionFalse, c.degradedInertia, currentDetailedStatus.Conditions...))
- configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, UnionClusterCondition(configv1.OperatorProgressing, operatorv1.ConditionFalse, nil, currentDetailedStatus.Conditions...))
- configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, UnionClusterCondition(configv1.OperatorAvailable, operatorv1.ConditionTrue, nil, currentDetailedStatus.Conditions...))
- configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, UnionClusterCondition(configv1.OperatorUpgradeable, operatorv1.ConditionTrue, nil, currentDetailedStatus.Conditions...))
- configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, UnionClusterCondition(configv1.EvaluationConditionsDetected, operatorv1.ConditionFalse, nil, currentDetailedStatus.Conditions...))
-
- c.syncStatusVersions(clusterOperatorObj, syncCtx)
-
- // if we have no diff, just return
- if equality.Semantic.DeepEqual(clusterOperatorObj, originalClusterOperatorObj) {
- return nil
- }
- klog.V(2).Infof("clusteroperator/%s diff %v", c.clusterOperatorName, resourceapply.JSONPatchNoError(originalClusterOperatorObj, clusterOperatorObj))
-
- if _, updateErr := c.clusterOperatorClient.ClusterOperators().UpdateStatus(ctx, clusterOperatorObj, metav1.UpdateOptions{}); updateErr != nil {
- return updateErr
- }
- if !skipOperatorStatusChangedEvent(originalClusterOperatorObj.Status, clusterOperatorObj.Status) {
- syncCtx.Recorder().Eventf("OperatorStatusChanged", "Status for clusteroperator/%s changed: %s", c.clusterOperatorName, configv1helpers.GetStatusDiff(originalClusterOperatorObj.Status, clusterOperatorObj.Status))
- }
- return nil
-}
-
-func skipOperatorStatusChangedEvent(originalStatus, newStatus configv1.ClusterOperatorStatus) bool {
- originalCopy := *originalStatus.DeepCopy()
- for i, condition := range originalCopy.Conditions {
- switch condition.Type {
- case configv1.OperatorAvailable, configv1.OperatorDegraded, configv1.OperatorProgressing, configv1.OperatorUpgradeable:
- originalCopy.Conditions[i].Message = strings.TrimPrefix(condition.Message, "\ufeff")
- }
- }
- return len(configv1helpers.GetStatusDiff(originalCopy, newStatus)) == 0
-}
-
-func (c *StatusSyncer) syncStatusVersions(clusterOperatorObj *configv1.ClusterOperator, syncCtx factory.SyncContext) {
- versions := c.versionGetter.GetVersions()
- // Add new versions from versionGetter to status
- for operand, version := range versions {
- previousVersion := operatorv1helpers.SetOperandVersion(&clusterOperatorObj.Status.Versions, configv1.OperandVersion{Name: operand, Version: version})
- if previousVersion != version {
- // having this message will give us a marker in events when the operator updated compared to when the operand is updated
- syncCtx.Recorder().Eventf("OperatorVersionChanged", "clusteroperator/%s version %q changed from %q to %q", c.clusterOperatorName, operand, previousVersion, version)
- }
- }
-
- if !c.removeUnusedVersions {
- return
- }
-
- // Filter out all versions from status that are not in versionGetter
- filteredVersions := make([]configv1.OperandVersion, 0, len(clusterOperatorObj.Status.Versions))
- for _, version := range clusterOperatorObj.Status.Versions {
- if _, found := versions[version.Name]; found {
- filteredVersions = append(filteredVersions, version)
- }
- }
-
- clusterOperatorObj.Status.Versions = filteredVersions
-}
-
-func (c *StatusSyncer) watchVersionGetterPostRunHook(ctx context.Context, syncCtx factory.SyncContext) error {
- defer utilruntime.HandleCrash()
-
- versionCh := c.versionGetter.VersionChangedChannel()
- // always kick at least once
- syncCtx.Queue().Add(factory.DefaultQueueKey)
-
- for {
- select {
- case <-ctx.Done():
- return nil
- case <-versionCh:
- syncCtx.Queue().Add(factory.DefaultQueueKey)
- }
- }
-}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/status/version.go b/vendor/github.com/openshift/library-go/pkg/operator/status/version.go
deleted file mode 100644
index 48046d963c..0000000000
--- a/vendor/github.com/openshift/library-go/pkg/operator/status/version.go
+++ /dev/null
@@ -1,96 +0,0 @@
-package status
-
-import (
- "context"
- "os"
- "sync"
-
- corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
-
- "github.com/openshift/library-go/pkg/operator/events"
- apierrors "k8s.io/apimachinery/pkg/api/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-type versionGetter struct {
- lock sync.Mutex
- versions map[string]string
- notificationChannels []chan struct{}
-}
-
-const (
- operandImageEnvVarName = "IMAGE"
- operandImageVersionEnvVarName = "OPERAND_IMAGE_VERSION"
- operatorImageVersionEnvVarName = "OPERATOR_IMAGE_VERSION"
-)
-
-func NewVersionGetter() VersionGetter {
- return &versionGetter{
- versions: map[string]string{},
- }
-}
-
-func (v *versionGetter) SetVersion(operandName, version string) {
- v.lock.Lock()
- defer v.lock.Unlock()
-
- v.versions[operandName] = version
-
- for i := range v.notificationChannels {
- ch := v.notificationChannels[i]
- // don't let a slow consumer block the rest
- go func() {
- ch <- struct{}{}
- }()
- }
-}
-
-func (v *versionGetter) GetVersions() map[string]string {
- v.lock.Lock()
- defer v.lock.Unlock()
-
- ret := map[string]string{}
- for k, v := range v.versions {
- ret[k] = v
- }
- return ret
-}
-
-func (v *versionGetter) VersionChangedChannel() <-chan struct{} {
- v.lock.Lock()
- defer v.lock.Unlock()
-
- channel := make(chan struct{}, 50)
- v.notificationChannels = append(v.notificationChannels, channel)
- return channel
-}
-
-func ImageForOperandFromEnv() string {
- return os.Getenv(operandImageEnvVarName)
-}
-
-func VersionForOperandFromEnv() string {
- return os.Getenv(operandImageVersionEnvVarName)
-}
-
-func VersionForOperatorFromEnv() string {
- return os.Getenv(operatorImageVersionEnvVarName)
-}
-
-func VersionForOperand(namespace, imagePullSpec string, configMapGetter corev1client.ConfigMapsGetter, eventRecorder events.Recorder) string {
- versionMap := map[string]string{}
- versionMapping, err := configMapGetter.ConfigMaps(namespace).Get(context.TODO(), "version-mapping", metav1.GetOptions{})
- if err != nil && !apierrors.IsNotFound(err) {
- eventRecorder.Warningf("VersionMappingFailure", "unable to get version mapping: %v", err)
- return ""
- }
- if versionMapping != nil {
- for version, image := range versionMapping.Data {
- versionMap[image] = version
- }
- }
-
- // we have the actual daemonset and we need the pull spec
- operandVersion := versionMap[imagePullSpec]
- return operandVersion
-}
diff --git a/vendor/k8s.io/code-generator/generate-groups.sh b/vendor/k8s.io/code-generator/generate-groups.sh
old mode 100644
new mode 100755
diff --git a/vendor/k8s.io/code-generator/generate-internal-groups.sh b/vendor/k8s.io/code-generator/generate-internal-groups.sh
old mode 100644
new mode 100755
diff --git a/vendor/modules.txt b/vendor/modules.txt
index eb7a385339..dbd4c0f5be 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -818,7 +818,7 @@ github.com/opencontainers/runc/libcontainer/user
# github.com/opencontainers/runtime-spec v1.1.0
## explicit
github.com/opencontainers/runtime-spec/specs-go
-# github.com/openshift/api v0.0.0-20240422085825-2624175e9673
+# github.com/openshift/api v0.0.0-20240425081546-8203151f085f
## explicit; go 1.21
github.com/openshift/api
github.com/openshift/api/annotations
@@ -840,6 +840,7 @@ github.com/openshift/api/config/v1alpha1/zz_generated.crd-manifests
github.com/openshift/api/console
github.com/openshift/api/console/v1
github.com/openshift/api/console/v1alpha1
+github.com/openshift/api/features
github.com/openshift/api/helm
github.com/openshift/api/helm/v1beta1
github.com/openshift/api/image
@@ -879,6 +880,8 @@ github.com/openshift/api/operatorcontrolplane
github.com/openshift/api/operatorcontrolplane/v1alpha1
github.com/openshift/api/osin
github.com/openshift/api/osin/v1
+github.com/openshift/api/payload-command/render
+github.com/openshift/api/payload-command/render/renderassets
github.com/openshift/api/pkg/serialization
github.com/openshift/api/project
github.com/openshift/api/project/v1
@@ -982,10 +985,7 @@ github.com/openshift/client-go/operator/informers/externalversions/operator/v1
github.com/openshift/client-go/operator/informers/externalversions/operator/v1alpha1
github.com/openshift/client-go/operator/listers/operator/v1
github.com/openshift/client-go/operator/listers/operator/v1alpha1
-# github.com/openshift/cluster-config-operator v0.0.0-alpha.0.0.20231213185242-e4dc676febfe
-## explicit; go 1.20
-github.com/openshift/cluster-config-operator/pkg/operator/featuregates
-# github.com/openshift/library-go v0.0.0-20240412173449-eb2f24c36528
+# github.com/openshift/library-go v0.0.0-20240424194921-cb8aac942b79
## explicit; go 1.21
github.com/openshift/library-go/pkg/certs
github.com/openshift/library-go/pkg/cloudprovider
@@ -1005,7 +1005,6 @@ github.com/openshift/library-go/pkg/operator/resource/resourcehelper
github.com/openshift/library-go/pkg/operator/resource/resourcemerge
github.com/openshift/library-go/pkg/operator/resource/resourceread
github.com/openshift/library-go/pkg/operator/resourcesynccontroller
-github.com/openshift/library-go/pkg/operator/status
github.com/openshift/library-go/pkg/operator/v1helpers
# github.com/openshift/runtime-utils v0.0.0-20230921210328-7bdb5b9c177b
## explicit; go 1.18